1 //===- lib/CodeGen/MachineInstr.cpp ---------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Methods common to all machine instructions. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/MachineInstr.h" 15 #include "llvm/ADT/APFloat.h" 16 #include "llvm/ADT/ArrayRef.h" 17 #include "llvm/ADT/FoldingSet.h" 18 #include "llvm/ADT/Hashing.h" 19 #include "llvm/ADT/None.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/SmallString.h" 22 #include "llvm/ADT/SmallVector.h" 23 #include "llvm/Analysis/AliasAnalysis.h" 24 #include "llvm/Analysis/Loads.h" 25 #include "llvm/Analysis/MemoryLocation.h" 26 #include "llvm/CodeGen/GlobalISel/RegisterBank.h" 27 #include "llvm/CodeGen/MachineBasicBlock.h" 28 #include "llvm/CodeGen/MachineFunction.h" 29 #include "llvm/CodeGen/MachineInstrBuilder.h" 30 #include "llvm/CodeGen/MachineInstrBundle.h" 31 #include "llvm/CodeGen/MachineMemOperand.h" 32 #include "llvm/CodeGen/MachineModuleInfo.h" 33 #include "llvm/CodeGen/MachineOperand.h" 34 #include "llvm/CodeGen/MachineRegisterInfo.h" 35 #include "llvm/CodeGen/PseudoSourceValue.h" 36 #include "llvm/IR/Constants.h" 37 #include "llvm/IR/DebugInfoMetadata.h" 38 #include "llvm/IR/DebugLoc.h" 39 #include "llvm/IR/DerivedTypes.h" 40 #include "llvm/IR/Function.h" 41 #include "llvm/IR/InlineAsm.h" 42 #include "llvm/IR/InstrTypes.h" 43 #include "llvm/IR/Intrinsics.h" 44 #include "llvm/IR/LLVMContext.h" 45 #include "llvm/IR/Metadata.h" 46 #include "llvm/IR/Module.h" 47 #include "llvm/IR/ModuleSlotTracker.h" 48 #include "llvm/IR/Type.h" 49 #include "llvm/IR/Value.h" 50 #include "llvm/MC/MCInstrDesc.h" 51 #include "llvm/MC/MCRegisterInfo.h" 52 #include "llvm/MC/MCSymbol.h" 53 #include "llvm/Support/Casting.h" 54 #include "llvm/Support/CommandLine.h" 55 #include "llvm/Support/Compiler.h" 56 #include "llvm/Support/Debug.h" 57 #include "llvm/Support/ErrorHandling.h" 58 #include "llvm/Support/LowLevelTypeImpl.h" 59 #include "llvm/Support/MathExtras.h" 60 #include "llvm/Support/raw_ostream.h" 61 #include "llvm/Target/TargetInstrInfo.h" 62 #include "llvm/Target/TargetIntrinsicInfo.h" 63 #include "llvm/Target/TargetMachine.h" 64 #include "llvm/Target/TargetRegisterInfo.h" 65 #include "llvm/Target/TargetSubtargetInfo.h" 66 #include <algorithm> 67 #include <cassert> 68 #include <cstddef> 69 #include <cstdint> 70 #include <cstring> 71 #include <iterator> 72 #include <utility> 73 74 using namespace llvm; 75 76 static cl::opt<bool> PrintWholeRegMask( 77 "print-whole-regmask", 78 cl::desc("Print the full contents of regmask operands in IR dumps"), 79 cl::init(true), cl::Hidden); 80 81 //===----------------------------------------------------------------------===// 82 // MachineOperand Implementation 83 //===----------------------------------------------------------------------===// 84 85 void MachineOperand::setReg(unsigned Reg) { 86 if (getReg() == Reg) return; // No change. 87 88 // Otherwise, we have to change the register. If this operand is embedded 89 // into a machine function, we need to update the old and new register's 90 // use/def lists. 91 if (MachineInstr *MI = getParent()) 92 if (MachineBasicBlock *MBB = MI->getParent()) 93 if (MachineFunction *MF = MBB->getParent()) { 94 MachineRegisterInfo &MRI = MF->getRegInfo(); 95 MRI.removeRegOperandFromUseList(this); 96 SmallContents.RegNo = Reg; 97 MRI.addRegOperandToUseList(this); 98 return; 99 } 100 101 // Otherwise, just change the register, no problem. :) 102 SmallContents.RegNo = Reg; 103 } 104 105 void MachineOperand::substVirtReg(unsigned Reg, unsigned SubIdx, 106 const TargetRegisterInfo &TRI) { 107 assert(TargetRegisterInfo::isVirtualRegister(Reg)); 108 if (SubIdx && getSubReg()) 109 SubIdx = TRI.composeSubRegIndices(SubIdx, getSubReg()); 110 setReg(Reg); 111 if (SubIdx) 112 setSubReg(SubIdx); 113 } 114 115 void MachineOperand::substPhysReg(unsigned Reg, const TargetRegisterInfo &TRI) { 116 assert(TargetRegisterInfo::isPhysicalRegister(Reg)); 117 if (getSubReg()) { 118 Reg = TRI.getSubReg(Reg, getSubReg()); 119 // Note that getSubReg() may return 0 if the sub-register doesn't exist. 120 // That won't happen in legal code. 121 setSubReg(0); 122 if (isDef()) 123 setIsUndef(false); 124 } 125 setReg(Reg); 126 } 127 128 /// Change a def to a use, or a use to a def. 129 void MachineOperand::setIsDef(bool Val) { 130 assert(isReg() && "Wrong MachineOperand accessor"); 131 assert((!Val || !isDebug()) && "Marking a debug operation as def"); 132 if (IsDef == Val) 133 return; 134 // MRI may keep uses and defs in different list positions. 135 if (MachineInstr *MI = getParent()) 136 if (MachineBasicBlock *MBB = MI->getParent()) 137 if (MachineFunction *MF = MBB->getParent()) { 138 MachineRegisterInfo &MRI = MF->getRegInfo(); 139 MRI.removeRegOperandFromUseList(this); 140 IsDef = Val; 141 MRI.addRegOperandToUseList(this); 142 return; 143 } 144 IsDef = Val; 145 } 146 147 // If this operand is currently a register operand, and if this is in a 148 // function, deregister the operand from the register's use/def list. 149 void MachineOperand::removeRegFromUses() { 150 if (!isReg() || !isOnRegUseList()) 151 return; 152 153 if (MachineInstr *MI = getParent()) { 154 if (MachineBasicBlock *MBB = MI->getParent()) { 155 if (MachineFunction *MF = MBB->getParent()) 156 MF->getRegInfo().removeRegOperandFromUseList(this); 157 } 158 } 159 } 160 161 /// ChangeToImmediate - Replace this operand with a new immediate operand of 162 /// the specified value. If an operand is known to be an immediate already, 163 /// the setImm method should be used. 164 void MachineOperand::ChangeToImmediate(int64_t ImmVal) { 165 assert((!isReg() || !isTied()) && "Cannot change a tied operand into an imm"); 166 167 removeRegFromUses(); 168 169 OpKind = MO_Immediate; 170 Contents.ImmVal = ImmVal; 171 } 172 173 void MachineOperand::ChangeToFPImmediate(const ConstantFP *FPImm) { 174 assert((!isReg() || !isTied()) && "Cannot change a tied operand into an imm"); 175 176 removeRegFromUses(); 177 178 OpKind = MO_FPImmediate; 179 Contents.CFP = FPImm; 180 } 181 182 void MachineOperand::ChangeToES(const char *SymName, unsigned char TargetFlags) { 183 assert((!isReg() || !isTied()) && 184 "Cannot change a tied operand into an external symbol"); 185 186 removeRegFromUses(); 187 188 OpKind = MO_ExternalSymbol; 189 Contents.OffsetedInfo.Val.SymbolName = SymName; 190 setOffset(0); // Offset is always 0. 191 setTargetFlags(TargetFlags); 192 } 193 194 void MachineOperand::ChangeToMCSymbol(MCSymbol *Sym) { 195 assert((!isReg() || !isTied()) && 196 "Cannot change a tied operand into an MCSymbol"); 197 198 removeRegFromUses(); 199 200 OpKind = MO_MCSymbol; 201 Contents.Sym = Sym; 202 } 203 204 void MachineOperand::ChangeToFrameIndex(int Idx) { 205 assert((!isReg() || !isTied()) && 206 "Cannot change a tied operand into a FrameIndex"); 207 208 removeRegFromUses(); 209 210 OpKind = MO_FrameIndex; 211 setIndex(Idx); 212 } 213 214 /// ChangeToRegister - Replace this operand with a new register operand of 215 /// the specified value. If an operand is known to be an register already, 216 /// the setReg method should be used. 217 void MachineOperand::ChangeToRegister(unsigned Reg, bool isDef, bool isImp, 218 bool isKill, bool isDead, bool isUndef, 219 bool isDebug) { 220 MachineRegisterInfo *RegInfo = nullptr; 221 if (MachineInstr *MI = getParent()) 222 if (MachineBasicBlock *MBB = MI->getParent()) 223 if (MachineFunction *MF = MBB->getParent()) 224 RegInfo = &MF->getRegInfo(); 225 // If this operand is already a register operand, remove it from the 226 // register's use/def lists. 227 bool WasReg = isReg(); 228 if (RegInfo && WasReg) 229 RegInfo->removeRegOperandFromUseList(this); 230 231 // Change this to a register and set the reg#. 232 OpKind = MO_Register; 233 SmallContents.RegNo = Reg; 234 SubReg_TargetFlags = 0; 235 IsDef = isDef; 236 IsImp = isImp; 237 IsKill = isKill; 238 IsDead = isDead; 239 IsUndef = isUndef; 240 IsInternalRead = false; 241 IsEarlyClobber = false; 242 IsDebug = isDebug; 243 // Ensure isOnRegUseList() returns false. 244 Contents.Reg.Prev = nullptr; 245 // Preserve the tie when the operand was already a register. 246 if (!WasReg) 247 TiedTo = 0; 248 249 // If this operand is embedded in a function, add the operand to the 250 // register's use/def list. 251 if (RegInfo) 252 RegInfo->addRegOperandToUseList(this); 253 } 254 255 /// isIdenticalTo - Return true if this operand is identical to the specified 256 /// operand. Note that this should stay in sync with the hash_value overload 257 /// below. 258 bool MachineOperand::isIdenticalTo(const MachineOperand &Other) const { 259 if (getType() != Other.getType() || 260 getTargetFlags() != Other.getTargetFlags()) 261 return false; 262 263 switch (getType()) { 264 case MachineOperand::MO_Register: 265 return getReg() == Other.getReg() && isDef() == Other.isDef() && 266 getSubReg() == Other.getSubReg(); 267 case MachineOperand::MO_Immediate: 268 return getImm() == Other.getImm(); 269 case MachineOperand::MO_CImmediate: 270 return getCImm() == Other.getCImm(); 271 case MachineOperand::MO_FPImmediate: 272 return getFPImm() == Other.getFPImm(); 273 case MachineOperand::MO_MachineBasicBlock: 274 return getMBB() == Other.getMBB(); 275 case MachineOperand::MO_FrameIndex: 276 return getIndex() == Other.getIndex(); 277 case MachineOperand::MO_ConstantPoolIndex: 278 case MachineOperand::MO_TargetIndex: 279 return getIndex() == Other.getIndex() && getOffset() == Other.getOffset(); 280 case MachineOperand::MO_JumpTableIndex: 281 return getIndex() == Other.getIndex(); 282 case MachineOperand::MO_GlobalAddress: 283 return getGlobal() == Other.getGlobal() && getOffset() == Other.getOffset(); 284 case MachineOperand::MO_ExternalSymbol: 285 return strcmp(getSymbolName(), Other.getSymbolName()) == 0 && 286 getOffset() == Other.getOffset(); 287 case MachineOperand::MO_BlockAddress: 288 return getBlockAddress() == Other.getBlockAddress() && 289 getOffset() == Other.getOffset(); 290 case MachineOperand::MO_RegisterMask: 291 case MachineOperand::MO_RegisterLiveOut: { 292 // Shallow compare of the two RegMasks 293 const uint32_t *RegMask = getRegMask(); 294 const uint32_t *OtherRegMask = Other.getRegMask(); 295 if (RegMask == OtherRegMask) 296 return true; 297 298 // Calculate the size of the RegMask 299 const MachineFunction *MF = getParent()->getParent()->getParent(); 300 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); 301 unsigned RegMaskSize = (TRI->getNumRegs() + 31) / 32; 302 303 // Deep compare of the two RegMasks 304 return std::equal(RegMask, RegMask + RegMaskSize, OtherRegMask); 305 } 306 case MachineOperand::MO_MCSymbol: 307 return getMCSymbol() == Other.getMCSymbol(); 308 case MachineOperand::MO_CFIIndex: { 309 const MachineFunction *MF = getParent()->getParent()->getParent(); 310 const MachineFunction *OtherMF = 311 Other.getParent()->getParent()->getParent(); 312 MCCFIInstruction Inst = MF->getFrameInstructions()[getCFIIndex()]; 313 MCCFIInstruction OtherInst = 314 OtherMF->getFrameInstructions()[Other.getCFIIndex()]; 315 MCCFIInstruction::OpType op = Inst.getOperation(); 316 if (op != OtherInst.getOperation()) return false; 317 if (op == MCCFIInstruction::OpDefCfa || op == MCCFIInstruction::OpOffset || 318 op == MCCFIInstruction::OpRestore || 319 op == MCCFIInstruction::OpUndefined || 320 op == MCCFIInstruction::OpSameValue || 321 op == MCCFIInstruction::OpDefCfaRegister || 322 op == MCCFIInstruction::OpRelOffset || 323 op == MCCFIInstruction::OpRegister) 324 if (Inst.getRegister() != OtherInst.getRegister()) return false; 325 if (op == MCCFIInstruction::OpRegister) 326 if (Inst.getRegister2() != OtherInst.getRegister2()) return false; 327 if (op == MCCFIInstruction::OpDefCfa || op == MCCFIInstruction::OpOffset || 328 op == MCCFIInstruction::OpRelOffset || 329 op == MCCFIInstruction::OpDefCfaOffset || 330 op == MCCFIInstruction::OpAdjustCfaOffset || 331 op == MCCFIInstruction::OpGnuArgsSize) 332 if (Inst.getOffset() != OtherInst.getOffset()) return false; 333 return true; 334 } 335 case MachineOperand::MO_Metadata: 336 return getMetadata() == Other.getMetadata(); 337 case MachineOperand::MO_IntrinsicID: 338 return getIntrinsicID() == Other.getIntrinsicID(); 339 case MachineOperand::MO_Predicate: 340 return getPredicate() == Other.getPredicate(); 341 } 342 llvm_unreachable("Invalid machine operand type"); 343 } 344 345 // Note: this must stay exactly in sync with isIdenticalTo above. 346 hash_code llvm::hash_value(const MachineOperand &MO) { 347 switch (MO.getType()) { 348 case MachineOperand::MO_Register: 349 // Register operands don't have target flags. 350 return hash_combine(MO.getType(), MO.getReg(), MO.getSubReg(), MO.isDef()); 351 case MachineOperand::MO_Immediate: 352 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getImm()); 353 case MachineOperand::MO_CImmediate: 354 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getCImm()); 355 case MachineOperand::MO_FPImmediate: 356 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getFPImm()); 357 case MachineOperand::MO_MachineBasicBlock: 358 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getMBB()); 359 case MachineOperand::MO_FrameIndex: 360 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getIndex()); 361 case MachineOperand::MO_ConstantPoolIndex: 362 case MachineOperand::MO_TargetIndex: 363 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getIndex(), 364 MO.getOffset()); 365 case MachineOperand::MO_JumpTableIndex: 366 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getIndex()); 367 case MachineOperand::MO_ExternalSymbol: 368 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getOffset(), 369 MO.getSymbolName()); 370 case MachineOperand::MO_GlobalAddress: 371 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getGlobal(), 372 MO.getOffset()); 373 case MachineOperand::MO_BlockAddress: 374 return hash_combine(MO.getType(), MO.getTargetFlags(), 375 MO.getBlockAddress(), MO.getOffset()); 376 case MachineOperand::MO_RegisterMask: 377 case MachineOperand::MO_RegisterLiveOut: 378 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getRegMask()); 379 case MachineOperand::MO_Metadata: 380 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getMetadata()); 381 case MachineOperand::MO_MCSymbol: 382 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getMCSymbol()); 383 case MachineOperand::MO_CFIIndex: { 384 const MachineFunction *MF = MO.getParent()->getParent()->getParent(); 385 MCCFIInstruction Inst = MF->getFrameInstructions()[MO.getCFIIndex()]; 386 return hash_combine(MO.getType(), MO.getTargetFlags(), Inst.getOperation(), 387 Inst.getRegister(), Inst.getRegister2(), 388 Inst.getOffset()); 389 } 390 case MachineOperand::MO_IntrinsicID: 391 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getIntrinsicID()); 392 case MachineOperand::MO_Predicate: 393 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getPredicate()); 394 } 395 llvm_unreachable("Invalid machine operand type"); 396 } 397 398 void MachineOperand::print(raw_ostream &OS, const TargetRegisterInfo *TRI, 399 const TargetIntrinsicInfo *IntrinsicInfo) const { 400 ModuleSlotTracker DummyMST(nullptr); 401 print(OS, DummyMST, TRI, IntrinsicInfo); 402 } 403 404 void MachineOperand::print(raw_ostream &OS, ModuleSlotTracker &MST, 405 const TargetRegisterInfo *TRI, 406 const TargetIntrinsicInfo *IntrinsicInfo) const { 407 switch (getType()) { 408 case MachineOperand::MO_Register: 409 OS << PrintReg(getReg(), TRI, getSubReg()); 410 411 if (isDef() || isKill() || isDead() || isImplicit() || isUndef() || 412 isInternalRead() || isEarlyClobber() || isTied()) { 413 OS << '<'; 414 bool NeedComma = false; 415 if (isDef()) { 416 if (NeedComma) OS << ','; 417 if (isEarlyClobber()) 418 OS << "earlyclobber,"; 419 if (isImplicit()) 420 OS << "imp-"; 421 OS << "def"; 422 NeedComma = true; 423 // <def,read-undef> only makes sense when getSubReg() is set. 424 // Don't clutter the output otherwise. 425 if (isUndef() && getSubReg()) 426 OS << ",read-undef"; 427 } else if (isImplicit()) { 428 OS << "imp-use"; 429 NeedComma = true; 430 } 431 432 if (isKill()) { 433 if (NeedComma) OS << ','; 434 OS << "kill"; 435 NeedComma = true; 436 } 437 if (isDead()) { 438 if (NeedComma) OS << ','; 439 OS << "dead"; 440 NeedComma = true; 441 } 442 if (isUndef() && isUse()) { 443 if (NeedComma) OS << ','; 444 OS << "undef"; 445 NeedComma = true; 446 } 447 if (isInternalRead()) { 448 if (NeedComma) OS << ','; 449 OS << "internal"; 450 NeedComma = true; 451 } 452 if (isTied()) { 453 if (NeedComma) OS << ','; 454 OS << "tied"; 455 if (TiedTo != 15) 456 OS << unsigned(TiedTo - 1); 457 } 458 OS << '>'; 459 } 460 break; 461 case MachineOperand::MO_Immediate: 462 OS << getImm(); 463 break; 464 case MachineOperand::MO_CImmediate: 465 getCImm()->getValue().print(OS, false); 466 break; 467 case MachineOperand::MO_FPImmediate: 468 if (getFPImm()->getType()->isFloatTy()) { 469 OS << getFPImm()->getValueAPF().convertToFloat(); 470 } else if (getFPImm()->getType()->isHalfTy()) { 471 APFloat APF = getFPImm()->getValueAPF(); 472 bool Unused; 473 APF.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven, &Unused); 474 OS << "half " << APF.convertToFloat(); 475 } else if (getFPImm()->getType()->isFP128Ty()) { 476 APFloat APF = getFPImm()->getValueAPF(); 477 SmallString<16> Str; 478 getFPImm()->getValueAPF().toString(Str); 479 OS << "quad " << Str; 480 } else { 481 OS << getFPImm()->getValueAPF().convertToDouble(); 482 } 483 break; 484 case MachineOperand::MO_MachineBasicBlock: 485 OS << "<BB#" << getMBB()->getNumber() << ">"; 486 break; 487 case MachineOperand::MO_FrameIndex: 488 OS << "<fi#" << getIndex() << '>'; 489 break; 490 case MachineOperand::MO_ConstantPoolIndex: 491 OS << "<cp#" << getIndex(); 492 if (getOffset()) OS << "+" << getOffset(); 493 OS << '>'; 494 break; 495 case MachineOperand::MO_TargetIndex: 496 OS << "<ti#" << getIndex(); 497 if (getOffset()) OS << "+" << getOffset(); 498 OS << '>'; 499 break; 500 case MachineOperand::MO_JumpTableIndex: 501 OS << "<jt#" << getIndex() << '>'; 502 break; 503 case MachineOperand::MO_GlobalAddress: 504 OS << "<ga:"; 505 getGlobal()->printAsOperand(OS, /*PrintType=*/false, MST); 506 if (getOffset()) OS << "+" << getOffset(); 507 OS << '>'; 508 break; 509 case MachineOperand::MO_ExternalSymbol: 510 OS << "<es:" << getSymbolName(); 511 if (getOffset()) OS << "+" << getOffset(); 512 OS << '>'; 513 break; 514 case MachineOperand::MO_BlockAddress: 515 OS << '<'; 516 getBlockAddress()->printAsOperand(OS, /*PrintType=*/false, MST); 517 if (getOffset()) OS << "+" << getOffset(); 518 OS << '>'; 519 break; 520 case MachineOperand::MO_RegisterMask: { 521 unsigned NumRegsInMask = 0; 522 unsigned NumRegsEmitted = 0; 523 OS << "<regmask"; 524 for (unsigned i = 0; i < TRI->getNumRegs(); ++i) { 525 unsigned MaskWord = i / 32; 526 unsigned MaskBit = i % 32; 527 if (getRegMask()[MaskWord] & (1 << MaskBit)) { 528 if (PrintWholeRegMask || NumRegsEmitted <= 10) { 529 OS << " " << PrintReg(i, TRI); 530 NumRegsEmitted++; 531 } 532 NumRegsInMask++; 533 } 534 } 535 if (NumRegsEmitted != NumRegsInMask) 536 OS << " and " << (NumRegsInMask - NumRegsEmitted) << " more..."; 537 OS << ">"; 538 break; 539 } 540 case MachineOperand::MO_RegisterLiveOut: 541 OS << "<regliveout>"; 542 break; 543 case MachineOperand::MO_Metadata: 544 OS << '<'; 545 getMetadata()->printAsOperand(OS, MST); 546 OS << '>'; 547 break; 548 case MachineOperand::MO_MCSymbol: 549 OS << "<MCSym=" << *getMCSymbol() << '>'; 550 break; 551 case MachineOperand::MO_CFIIndex: 552 OS << "<call frame instruction>"; 553 break; 554 case MachineOperand::MO_IntrinsicID: { 555 Intrinsic::ID ID = getIntrinsicID(); 556 if (ID < Intrinsic::num_intrinsics) 557 OS << "<intrinsic:@" << Intrinsic::getName(ID, None) << '>'; 558 else if (IntrinsicInfo) 559 OS << "<intrinsic:@" << IntrinsicInfo->getName(ID) << '>'; 560 else 561 OS << "<intrinsic:" << ID << '>'; 562 break; 563 } 564 case MachineOperand::MO_Predicate: { 565 auto Pred = static_cast<CmpInst::Predicate>(getPredicate()); 566 OS << '<' << (CmpInst::isIntPredicate(Pred) ? "intpred" : "floatpred") 567 << CmpInst::getPredicateName(Pred) << '>'; 568 break; 569 } 570 } 571 if (unsigned TF = getTargetFlags()) 572 OS << "[TF=" << TF << ']'; 573 } 574 575 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 576 LLVM_DUMP_METHOD void MachineOperand::dump() const { 577 dbgs() << *this << '\n'; 578 } 579 #endif 580 581 //===----------------------------------------------------------------------===// 582 // MachineMemOperand Implementation 583 //===----------------------------------------------------------------------===// 584 585 /// getAddrSpace - Return the LLVM IR address space number that this pointer 586 /// points into. 587 unsigned MachinePointerInfo::getAddrSpace() const { 588 if (V.isNull() || V.is<const PseudoSourceValue*>()) return 0; 589 return cast<PointerType>(V.get<const Value*>()->getType())->getAddressSpace(); 590 } 591 592 /// isDereferenceable - Return true if V is always dereferenceable for 593 /// Offset + Size byte. 594 bool MachinePointerInfo::isDereferenceable(unsigned Size, LLVMContext &C, 595 const DataLayout &DL) const { 596 if (!V.is<const Value*>()) 597 return false; 598 599 const Value *BasePtr = V.get<const Value*>(); 600 if (BasePtr == nullptr) 601 return false; 602 603 return isDereferenceableAndAlignedPointer(BasePtr, 1, 604 APInt(DL.getPointerSize(), 605 Offset + Size), 606 DL); 607 } 608 609 /// getConstantPool - Return a MachinePointerInfo record that refers to the 610 /// constant pool. 611 MachinePointerInfo MachinePointerInfo::getConstantPool(MachineFunction &MF) { 612 return MachinePointerInfo(MF.getPSVManager().getConstantPool()); 613 } 614 615 /// getFixedStack - Return a MachinePointerInfo record that refers to the 616 /// the specified FrameIndex. 617 MachinePointerInfo MachinePointerInfo::getFixedStack(MachineFunction &MF, 618 int FI, int64_t Offset) { 619 return MachinePointerInfo(MF.getPSVManager().getFixedStack(FI), Offset); 620 } 621 622 MachinePointerInfo MachinePointerInfo::getJumpTable(MachineFunction &MF) { 623 return MachinePointerInfo(MF.getPSVManager().getJumpTable()); 624 } 625 626 MachinePointerInfo MachinePointerInfo::getGOT(MachineFunction &MF) { 627 return MachinePointerInfo(MF.getPSVManager().getGOT()); 628 } 629 630 MachinePointerInfo MachinePointerInfo::getStack(MachineFunction &MF, 631 int64_t Offset) { 632 return MachinePointerInfo(MF.getPSVManager().getStack(), Offset); 633 } 634 635 MachineMemOperand::MachineMemOperand(MachinePointerInfo ptrinfo, Flags f, 636 uint64_t s, unsigned int a, 637 const AAMDNodes &AAInfo, 638 const MDNode *Ranges, 639 SynchronizationScope SynchScope, 640 AtomicOrdering Ordering, 641 AtomicOrdering FailureOrdering) 642 : PtrInfo(ptrinfo), Size(s), FlagVals(f), BaseAlignLog2(Log2_32(a) + 1), 643 AAInfo(AAInfo), Ranges(Ranges) { 644 assert((PtrInfo.V.isNull() || PtrInfo.V.is<const PseudoSourceValue*>() || 645 isa<PointerType>(PtrInfo.V.get<const Value*>()->getType())) && 646 "invalid pointer value"); 647 assert(getBaseAlignment() == a && "Alignment is not a power of 2!"); 648 assert((isLoad() || isStore()) && "Not a load/store!"); 649 650 AtomicInfo.SynchScope = static_cast<unsigned>(SynchScope); 651 assert(getSynchScope() == SynchScope && "Value truncated"); 652 AtomicInfo.Ordering = static_cast<unsigned>(Ordering); 653 assert(getOrdering() == Ordering && "Value truncated"); 654 AtomicInfo.FailureOrdering = static_cast<unsigned>(FailureOrdering); 655 assert(getFailureOrdering() == FailureOrdering && "Value truncated"); 656 } 657 658 /// Profile - Gather unique data for the object. 659 /// 660 void MachineMemOperand::Profile(FoldingSetNodeID &ID) const { 661 ID.AddInteger(getOffset()); 662 ID.AddInteger(Size); 663 ID.AddPointer(getOpaqueValue()); 664 ID.AddInteger(getFlags()); 665 ID.AddInteger(getBaseAlignment()); 666 } 667 668 void MachineMemOperand::refineAlignment(const MachineMemOperand *MMO) { 669 // The Value and Offset may differ due to CSE. But the flags and size 670 // should be the same. 671 assert(MMO->getFlags() == getFlags() && "Flags mismatch!"); 672 assert(MMO->getSize() == getSize() && "Size mismatch!"); 673 674 if (MMO->getBaseAlignment() >= getBaseAlignment()) { 675 // Update the alignment value. 676 BaseAlignLog2 = Log2_32(MMO->getBaseAlignment()) + 1; 677 // Also update the base and offset, because the new alignment may 678 // not be applicable with the old ones. 679 PtrInfo = MMO->PtrInfo; 680 } 681 } 682 683 /// getAlignment - Return the minimum known alignment in bytes of the 684 /// actual memory reference. 685 uint64_t MachineMemOperand::getAlignment() const { 686 return MinAlign(getBaseAlignment(), getOffset()); 687 } 688 689 void MachineMemOperand::print(raw_ostream &OS) const { 690 ModuleSlotTracker DummyMST(nullptr); 691 print(OS, DummyMST); 692 } 693 void MachineMemOperand::print(raw_ostream &OS, ModuleSlotTracker &MST) const { 694 assert((isLoad() || isStore()) && 695 "SV has to be a load, store or both."); 696 697 if (isVolatile()) 698 OS << "Volatile "; 699 700 if (isLoad()) 701 OS << "LD"; 702 if (isStore()) 703 OS << "ST"; 704 OS << getSize(); 705 706 // Print the address information. 707 OS << "["; 708 if (const Value *V = getValue()) 709 V->printAsOperand(OS, /*PrintType=*/false, MST); 710 else if (const PseudoSourceValue *PSV = getPseudoValue()) 711 PSV->printCustom(OS); 712 else 713 OS << "<unknown>"; 714 715 unsigned AS = getAddrSpace(); 716 if (AS != 0) 717 OS << "(addrspace=" << AS << ')'; 718 719 // If the alignment of the memory reference itself differs from the alignment 720 // of the base pointer, print the base alignment explicitly, next to the base 721 // pointer. 722 if (getBaseAlignment() != getAlignment()) 723 OS << "(align=" << getBaseAlignment() << ")"; 724 725 if (getOffset() != 0) 726 OS << "+" << getOffset(); 727 OS << "]"; 728 729 // Print the alignment of the reference. 730 if (getBaseAlignment() != getAlignment() || getBaseAlignment() != getSize()) 731 OS << "(align=" << getAlignment() << ")"; 732 733 // Print TBAA info. 734 if (const MDNode *TBAAInfo = getAAInfo().TBAA) { 735 OS << "(tbaa="; 736 if (TBAAInfo->getNumOperands() > 0) 737 TBAAInfo->getOperand(0)->printAsOperand(OS, MST); 738 else 739 OS << "<unknown>"; 740 OS << ")"; 741 } 742 743 // Print AA scope info. 744 if (const MDNode *ScopeInfo = getAAInfo().Scope) { 745 OS << "(alias.scope="; 746 if (ScopeInfo->getNumOperands() > 0) 747 for (unsigned i = 0, ie = ScopeInfo->getNumOperands(); i != ie; ++i) { 748 ScopeInfo->getOperand(i)->printAsOperand(OS, MST); 749 if (i != ie-1) 750 OS << ","; 751 } 752 else 753 OS << "<unknown>"; 754 OS << ")"; 755 } 756 757 // Print AA noalias scope info. 758 if (const MDNode *NoAliasInfo = getAAInfo().NoAlias) { 759 OS << "(noalias="; 760 if (NoAliasInfo->getNumOperands() > 0) 761 for (unsigned i = 0, ie = NoAliasInfo->getNumOperands(); i != ie; ++i) { 762 NoAliasInfo->getOperand(i)->printAsOperand(OS, MST); 763 if (i != ie-1) 764 OS << ","; 765 } 766 else 767 OS << "<unknown>"; 768 OS << ")"; 769 } 770 771 if (isNonTemporal()) 772 OS << "(nontemporal)"; 773 if (isDereferenceable()) 774 OS << "(dereferenceable)"; 775 if (isInvariant()) 776 OS << "(invariant)"; 777 } 778 779 //===----------------------------------------------------------------------===// 780 // MachineInstr Implementation 781 //===----------------------------------------------------------------------===// 782 783 void MachineInstr::addImplicitDefUseOperands(MachineFunction &MF) { 784 if (MCID->ImplicitDefs) 785 for (const MCPhysReg *ImpDefs = MCID->getImplicitDefs(); *ImpDefs; 786 ++ImpDefs) 787 addOperand(MF, MachineOperand::CreateReg(*ImpDefs, true, true)); 788 if (MCID->ImplicitUses) 789 for (const MCPhysReg *ImpUses = MCID->getImplicitUses(); *ImpUses; 790 ++ImpUses) 791 addOperand(MF, MachineOperand::CreateReg(*ImpUses, false, true)); 792 } 793 794 /// MachineInstr ctor - This constructor creates a MachineInstr and adds the 795 /// implicit operands. It reserves space for the number of operands specified by 796 /// the MCInstrDesc. 797 MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &tid, 798 DebugLoc dl, bool NoImp) 799 : MCID(&tid), debugLoc(std::move(dl)) { 800 assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor"); 801 802 // Reserve space for the expected number of operands. 803 if (unsigned NumOps = MCID->getNumOperands() + 804 MCID->getNumImplicitDefs() + MCID->getNumImplicitUses()) { 805 CapOperands = OperandCapacity::get(NumOps); 806 Operands = MF.allocateOperandArray(CapOperands); 807 } 808 809 if (!NoImp) 810 addImplicitDefUseOperands(MF); 811 } 812 813 /// MachineInstr ctor - Copies MachineInstr arg exactly 814 /// 815 MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI) 816 : MCID(&MI.getDesc()), NumMemRefs(MI.NumMemRefs), MemRefs(MI.MemRefs), 817 debugLoc(MI.getDebugLoc()) { 818 assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor"); 819 820 CapOperands = OperandCapacity::get(MI.getNumOperands()); 821 Operands = MF.allocateOperandArray(CapOperands); 822 823 // Copy operands. 824 for (const MachineOperand &MO : MI.operands()) 825 addOperand(MF, MO); 826 827 // Copy all the sensible flags. 828 setFlags(MI.Flags); 829 } 830 831 /// getRegInfo - If this instruction is embedded into a MachineFunction, 832 /// return the MachineRegisterInfo object for the current function, otherwise 833 /// return null. 834 MachineRegisterInfo *MachineInstr::getRegInfo() { 835 if (MachineBasicBlock *MBB = getParent()) 836 return &MBB->getParent()->getRegInfo(); 837 return nullptr; 838 } 839 840 /// RemoveRegOperandsFromUseLists - Unlink all of the register operands in 841 /// this instruction from their respective use lists. This requires that the 842 /// operands already be on their use lists. 843 void MachineInstr::RemoveRegOperandsFromUseLists(MachineRegisterInfo &MRI) { 844 for (MachineOperand &MO : operands()) 845 if (MO.isReg()) 846 MRI.removeRegOperandFromUseList(&MO); 847 } 848 849 /// AddRegOperandsToUseLists - Add all of the register operands in 850 /// this instruction from their respective use lists. This requires that the 851 /// operands not be on their use lists yet. 852 void MachineInstr::AddRegOperandsToUseLists(MachineRegisterInfo &MRI) { 853 for (MachineOperand &MO : operands()) 854 if (MO.isReg()) 855 MRI.addRegOperandToUseList(&MO); 856 } 857 858 void MachineInstr::addOperand(const MachineOperand &Op) { 859 MachineBasicBlock *MBB = getParent(); 860 assert(MBB && "Use MachineInstrBuilder to add operands to dangling instrs"); 861 MachineFunction *MF = MBB->getParent(); 862 assert(MF && "Use MachineInstrBuilder to add operands to dangling instrs"); 863 addOperand(*MF, Op); 864 } 865 866 /// Move NumOps MachineOperands from Src to Dst, with support for overlapping 867 /// ranges. If MRI is non-null also update use-def chains. 868 static void moveOperands(MachineOperand *Dst, MachineOperand *Src, 869 unsigned NumOps, MachineRegisterInfo *MRI) { 870 if (MRI) 871 return MRI->moveOperands(Dst, Src, NumOps); 872 873 // MachineOperand is a trivially copyable type so we can just use memmove. 874 std::memmove(Dst, Src, NumOps * sizeof(MachineOperand)); 875 } 876 877 /// addOperand - Add the specified operand to the instruction. If it is an 878 /// implicit operand, it is added to the end of the operand list. If it is 879 /// an explicit operand it is added at the end of the explicit operand list 880 /// (before the first implicit operand). 881 void MachineInstr::addOperand(MachineFunction &MF, const MachineOperand &Op) { 882 assert(MCID && "Cannot add operands before providing an instr descriptor"); 883 884 // Check if we're adding one of our existing operands. 885 if (&Op >= Operands && &Op < Operands + NumOperands) { 886 // This is unusual: MI->addOperand(MI->getOperand(i)). 887 // If adding Op requires reallocating or moving existing operands around, 888 // the Op reference could go stale. Support it by copying Op. 889 MachineOperand CopyOp(Op); 890 return addOperand(MF, CopyOp); 891 } 892 893 // Find the insert location for the new operand. Implicit registers go at 894 // the end, everything else goes before the implicit regs. 895 // 896 // FIXME: Allow mixed explicit and implicit operands on inline asm. 897 // InstrEmitter::EmitSpecialNode() is marking inline asm clobbers as 898 // implicit-defs, but they must not be moved around. See the FIXME in 899 // InstrEmitter.cpp. 900 unsigned OpNo = getNumOperands(); 901 bool isImpReg = Op.isReg() && Op.isImplicit(); 902 if (!isImpReg && !isInlineAsm()) { 903 while (OpNo && Operands[OpNo-1].isReg() && Operands[OpNo-1].isImplicit()) { 904 --OpNo; 905 assert(!Operands[OpNo].isTied() && "Cannot move tied operands"); 906 } 907 } 908 909 #ifndef NDEBUG 910 bool isMetaDataOp = Op.getType() == MachineOperand::MO_Metadata; 911 // OpNo now points as the desired insertion point. Unless this is a variadic 912 // instruction, only implicit regs are allowed beyond MCID->getNumOperands(). 913 // RegMask operands go between the explicit and implicit operands. 914 assert((isImpReg || Op.isRegMask() || MCID->isVariadic() || 915 OpNo < MCID->getNumOperands() || isMetaDataOp) && 916 "Trying to add an operand to a machine instr that is already done!"); 917 #endif 918 919 MachineRegisterInfo *MRI = getRegInfo(); 920 921 // Determine if the Operands array needs to be reallocated. 922 // Save the old capacity and operand array. 923 OperandCapacity OldCap = CapOperands; 924 MachineOperand *OldOperands = Operands; 925 if (!OldOperands || OldCap.getSize() == getNumOperands()) { 926 CapOperands = OldOperands ? OldCap.getNext() : OldCap.get(1); 927 Operands = MF.allocateOperandArray(CapOperands); 928 // Move the operands before the insertion point. 929 if (OpNo) 930 moveOperands(Operands, OldOperands, OpNo, MRI); 931 } 932 933 // Move the operands following the insertion point. 934 if (OpNo != NumOperands) 935 moveOperands(Operands + OpNo + 1, OldOperands + OpNo, NumOperands - OpNo, 936 MRI); 937 ++NumOperands; 938 939 // Deallocate the old operand array. 940 if (OldOperands != Operands && OldOperands) 941 MF.deallocateOperandArray(OldCap, OldOperands); 942 943 // Copy Op into place. It still needs to be inserted into the MRI use lists. 944 MachineOperand *NewMO = new (Operands + OpNo) MachineOperand(Op); 945 NewMO->ParentMI = this; 946 947 // When adding a register operand, tell MRI about it. 948 if (NewMO->isReg()) { 949 // Ensure isOnRegUseList() returns false, regardless of Op's status. 950 NewMO->Contents.Reg.Prev = nullptr; 951 // Ignore existing ties. This is not a property that can be copied. 952 NewMO->TiedTo = 0; 953 // Add the new operand to MRI, but only for instructions in an MBB. 954 if (MRI) 955 MRI->addRegOperandToUseList(NewMO); 956 // The MCID operand information isn't accurate until we start adding 957 // explicit operands. The implicit operands are added first, then the 958 // explicits are inserted before them. 959 if (!isImpReg) { 960 // Tie uses to defs as indicated in MCInstrDesc. 961 if (NewMO->isUse()) { 962 int DefIdx = MCID->getOperandConstraint(OpNo, MCOI::TIED_TO); 963 if (DefIdx != -1) 964 tieOperands(DefIdx, OpNo); 965 } 966 // If the register operand is flagged as early, mark the operand as such. 967 if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1) 968 NewMO->setIsEarlyClobber(true); 969 } 970 } 971 } 972 973 /// RemoveOperand - Erase an operand from an instruction, leaving it with one 974 /// fewer operand than it started with. 975 /// 976 void MachineInstr::RemoveOperand(unsigned OpNo) { 977 assert(OpNo < getNumOperands() && "Invalid operand number"); 978 untieRegOperand(OpNo); 979 980 #ifndef NDEBUG 981 // Moving tied operands would break the ties. 982 for (unsigned i = OpNo + 1, e = getNumOperands(); i != e; ++i) 983 if (Operands[i].isReg()) 984 assert(!Operands[i].isTied() && "Cannot move tied operands"); 985 #endif 986 987 MachineRegisterInfo *MRI = getRegInfo(); 988 if (MRI && Operands[OpNo].isReg()) 989 MRI->removeRegOperandFromUseList(Operands + OpNo); 990 991 // Don't call the MachineOperand destructor. A lot of this code depends on 992 // MachineOperand having a trivial destructor anyway, and adding a call here 993 // wouldn't make it 'destructor-correct'. 994 995 if (unsigned N = NumOperands - 1 - OpNo) 996 moveOperands(Operands + OpNo, Operands + OpNo + 1, N, MRI); 997 --NumOperands; 998 } 999 1000 /// addMemOperand - Add a MachineMemOperand to the machine instruction. 1001 /// This function should be used only occasionally. The setMemRefs function 1002 /// is the primary method for setting up a MachineInstr's MemRefs list. 1003 void MachineInstr::addMemOperand(MachineFunction &MF, 1004 MachineMemOperand *MO) { 1005 mmo_iterator OldMemRefs = MemRefs; 1006 unsigned OldNumMemRefs = NumMemRefs; 1007 1008 unsigned NewNum = NumMemRefs + 1; 1009 mmo_iterator NewMemRefs = MF.allocateMemRefsArray(NewNum); 1010 1011 std::copy(OldMemRefs, OldMemRefs + OldNumMemRefs, NewMemRefs); 1012 NewMemRefs[NewNum - 1] = MO; 1013 setMemRefs(NewMemRefs, NewMemRefs + NewNum); 1014 } 1015 1016 /// Check to see if the MMOs pointed to by the two MemRefs arrays are 1017 /// identical. 1018 static bool hasIdenticalMMOs(const MachineInstr &MI1, const MachineInstr &MI2) { 1019 auto I1 = MI1.memoperands_begin(), E1 = MI1.memoperands_end(); 1020 auto I2 = MI2.memoperands_begin(), E2 = MI2.memoperands_end(); 1021 if ((E1 - I1) != (E2 - I2)) 1022 return false; 1023 for (; I1 != E1; ++I1, ++I2) { 1024 if (**I1 != **I2) 1025 return false; 1026 } 1027 return true; 1028 } 1029 1030 std::pair<MachineInstr::mmo_iterator, unsigned> 1031 MachineInstr::mergeMemRefsWith(const MachineInstr& Other) { 1032 1033 // If either of the incoming memrefs are empty, we must be conservative and 1034 // treat this as if we've exhausted our space for memrefs and dropped them. 1035 if (memoperands_empty() || Other.memoperands_empty()) 1036 return std::make_pair(nullptr, 0); 1037 1038 // If both instructions have identical memrefs, we don't need to merge them. 1039 // Since many instructions have a single memref, and we tend to merge things 1040 // like pairs of loads from the same location, this catches a large number of 1041 // cases in practice. 1042 if (hasIdenticalMMOs(*this, Other)) 1043 return std::make_pair(MemRefs, NumMemRefs); 1044 1045 // TODO: consider uniquing elements within the operand lists to reduce 1046 // space usage and fall back to conservative information less often. 1047 size_t CombinedNumMemRefs = NumMemRefs + Other.NumMemRefs; 1048 1049 // If we don't have enough room to store this many memrefs, be conservative 1050 // and drop them. Otherwise, we'd fail asserts when trying to add them to 1051 // the new instruction. 1052 if (CombinedNumMemRefs != uint8_t(CombinedNumMemRefs)) 1053 return std::make_pair(nullptr, 0); 1054 1055 MachineFunction *MF = getParent()->getParent(); 1056 mmo_iterator MemBegin = MF->allocateMemRefsArray(CombinedNumMemRefs); 1057 mmo_iterator MemEnd = std::copy(memoperands_begin(), memoperands_end(), 1058 MemBegin); 1059 MemEnd = std::copy(Other.memoperands_begin(), Other.memoperands_end(), 1060 MemEnd); 1061 assert(MemEnd - MemBegin == (ptrdiff_t)CombinedNumMemRefs && 1062 "missing memrefs"); 1063 1064 return std::make_pair(MemBegin, CombinedNumMemRefs); 1065 } 1066 1067 bool MachineInstr::hasPropertyInBundle(unsigned Mask, QueryType Type) const { 1068 assert(!isBundledWithPred() && "Must be called on bundle header"); 1069 for (MachineBasicBlock::const_instr_iterator MII = getIterator();; ++MII) { 1070 if (MII->getDesc().getFlags() & Mask) { 1071 if (Type == AnyInBundle) 1072 return true; 1073 } else { 1074 if (Type == AllInBundle && !MII->isBundle()) 1075 return false; 1076 } 1077 // This was the last instruction in the bundle. 1078 if (!MII->isBundledWithSucc()) 1079 return Type == AllInBundle; 1080 } 1081 } 1082 1083 bool MachineInstr::isIdenticalTo(const MachineInstr &Other, 1084 MICheckType Check) const { 1085 // If opcodes or number of operands are not the same then the two 1086 // instructions are obviously not identical. 1087 if (Other.getOpcode() != getOpcode() || 1088 Other.getNumOperands() != getNumOperands()) 1089 return false; 1090 1091 if (isBundle()) { 1092 // We have passed the test above that both instructions have the same 1093 // opcode, so we know that both instructions are bundles here. Let's compare 1094 // MIs inside the bundle. 1095 assert(Other.isBundle() && "Expected that both instructions are bundles."); 1096 MachineBasicBlock::const_instr_iterator I1 = getIterator(); 1097 MachineBasicBlock::const_instr_iterator I2 = Other.getIterator(); 1098 // Loop until we analysed the last intruction inside at least one of the 1099 // bundles. 1100 while (I1->isBundledWithSucc() && I2->isBundledWithSucc()) { 1101 ++I1; 1102 ++I2; 1103 if (!I1->isIdenticalTo(*I2, Check)) 1104 return false; 1105 } 1106 // If we've reached the end of just one of the two bundles, but not both, 1107 // the instructions are not identical. 1108 if (I1->isBundledWithSucc() || I2->isBundledWithSucc()) 1109 return false; 1110 } 1111 1112 // Check operands to make sure they match. 1113 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 1114 const MachineOperand &MO = getOperand(i); 1115 const MachineOperand &OMO = Other.getOperand(i); 1116 if (!MO.isReg()) { 1117 if (!MO.isIdenticalTo(OMO)) 1118 return false; 1119 continue; 1120 } 1121 1122 // Clients may or may not want to ignore defs when testing for equality. 1123 // For example, machine CSE pass only cares about finding common 1124 // subexpressions, so it's safe to ignore virtual register defs. 1125 if (MO.isDef()) { 1126 if (Check == IgnoreDefs) 1127 continue; 1128 else if (Check == IgnoreVRegDefs) { 1129 if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()) || 1130 TargetRegisterInfo::isPhysicalRegister(OMO.getReg())) 1131 if (MO.getReg() != OMO.getReg()) 1132 return false; 1133 } else { 1134 if (!MO.isIdenticalTo(OMO)) 1135 return false; 1136 if (Check == CheckKillDead && MO.isDead() != OMO.isDead()) 1137 return false; 1138 } 1139 } else { 1140 if (!MO.isIdenticalTo(OMO)) 1141 return false; 1142 if (Check == CheckKillDead && MO.isKill() != OMO.isKill()) 1143 return false; 1144 } 1145 } 1146 // If DebugLoc does not match then two dbg.values are not identical. 1147 if (isDebugValue()) 1148 if (getDebugLoc() && Other.getDebugLoc() && 1149 getDebugLoc() != Other.getDebugLoc()) 1150 return false; 1151 return true; 1152 } 1153 1154 MachineInstr *MachineInstr::removeFromParent() { 1155 assert(getParent() && "Not embedded in a basic block!"); 1156 return getParent()->remove(this); 1157 } 1158 1159 MachineInstr *MachineInstr::removeFromBundle() { 1160 assert(getParent() && "Not embedded in a basic block!"); 1161 return getParent()->remove_instr(this); 1162 } 1163 1164 void MachineInstr::eraseFromParent() { 1165 assert(getParent() && "Not embedded in a basic block!"); 1166 getParent()->erase(this); 1167 } 1168 1169 void MachineInstr::eraseFromParentAndMarkDBGValuesForRemoval() { 1170 assert(getParent() && "Not embedded in a basic block!"); 1171 MachineBasicBlock *MBB = getParent(); 1172 MachineFunction *MF = MBB->getParent(); 1173 assert(MF && "Not embedded in a function!"); 1174 1175 MachineInstr *MI = (MachineInstr *)this; 1176 MachineRegisterInfo &MRI = MF->getRegInfo(); 1177 1178 for (const MachineOperand &MO : MI->operands()) { 1179 if (!MO.isReg() || !MO.isDef()) 1180 continue; 1181 unsigned Reg = MO.getReg(); 1182 if (!TargetRegisterInfo::isVirtualRegister(Reg)) 1183 continue; 1184 MRI.markUsesInDebugValueAsUndef(Reg); 1185 } 1186 MI->eraseFromParent(); 1187 } 1188 1189 void MachineInstr::eraseFromBundle() { 1190 assert(getParent() && "Not embedded in a basic block!"); 1191 getParent()->erase_instr(this); 1192 } 1193 1194 /// getNumExplicitOperands - Returns the number of non-implicit operands. 1195 /// 1196 unsigned MachineInstr::getNumExplicitOperands() const { 1197 unsigned NumOperands = MCID->getNumOperands(); 1198 if (!MCID->isVariadic()) 1199 return NumOperands; 1200 1201 for (unsigned i = NumOperands, e = getNumOperands(); i != e; ++i) { 1202 const MachineOperand &MO = getOperand(i); 1203 if (!MO.isReg() || !MO.isImplicit()) 1204 NumOperands++; 1205 } 1206 return NumOperands; 1207 } 1208 1209 void MachineInstr::bundleWithPred() { 1210 assert(!isBundledWithPred() && "MI is already bundled with its predecessor"); 1211 setFlag(BundledPred); 1212 MachineBasicBlock::instr_iterator Pred = getIterator(); 1213 --Pred; 1214 assert(!Pred->isBundledWithSucc() && "Inconsistent bundle flags"); 1215 Pred->setFlag(BundledSucc); 1216 } 1217 1218 void MachineInstr::bundleWithSucc() { 1219 assert(!isBundledWithSucc() && "MI is already bundled with its successor"); 1220 setFlag(BundledSucc); 1221 MachineBasicBlock::instr_iterator Succ = getIterator(); 1222 ++Succ; 1223 assert(!Succ->isBundledWithPred() && "Inconsistent bundle flags"); 1224 Succ->setFlag(BundledPred); 1225 } 1226 1227 void MachineInstr::unbundleFromPred() { 1228 assert(isBundledWithPred() && "MI isn't bundled with its predecessor"); 1229 clearFlag(BundledPred); 1230 MachineBasicBlock::instr_iterator Pred = getIterator(); 1231 --Pred; 1232 assert(Pred->isBundledWithSucc() && "Inconsistent bundle flags"); 1233 Pred->clearFlag(BundledSucc); 1234 } 1235 1236 void MachineInstr::unbundleFromSucc() { 1237 assert(isBundledWithSucc() && "MI isn't bundled with its successor"); 1238 clearFlag(BundledSucc); 1239 MachineBasicBlock::instr_iterator Succ = getIterator(); 1240 ++Succ; 1241 assert(Succ->isBundledWithPred() && "Inconsistent bundle flags"); 1242 Succ->clearFlag(BundledPred); 1243 } 1244 1245 bool MachineInstr::isStackAligningInlineAsm() const { 1246 if (isInlineAsm()) { 1247 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); 1248 if (ExtraInfo & InlineAsm::Extra_IsAlignStack) 1249 return true; 1250 } 1251 return false; 1252 } 1253 1254 InlineAsm::AsmDialect MachineInstr::getInlineAsmDialect() const { 1255 assert(isInlineAsm() && "getInlineAsmDialect() only works for inline asms!"); 1256 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); 1257 return InlineAsm::AsmDialect((ExtraInfo & InlineAsm::Extra_AsmDialect) != 0); 1258 } 1259 1260 int MachineInstr::findInlineAsmFlagIdx(unsigned OpIdx, 1261 unsigned *GroupNo) const { 1262 assert(isInlineAsm() && "Expected an inline asm instruction"); 1263 assert(OpIdx < getNumOperands() && "OpIdx out of range"); 1264 1265 // Ignore queries about the initial operands. 1266 if (OpIdx < InlineAsm::MIOp_FirstOperand) 1267 return -1; 1268 1269 unsigned Group = 0; 1270 unsigned NumOps; 1271 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e; 1272 i += NumOps) { 1273 const MachineOperand &FlagMO = getOperand(i); 1274 // If we reach the implicit register operands, stop looking. 1275 if (!FlagMO.isImm()) 1276 return -1; 1277 NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm()); 1278 if (i + NumOps > OpIdx) { 1279 if (GroupNo) 1280 *GroupNo = Group; 1281 return i; 1282 } 1283 ++Group; 1284 } 1285 return -1; 1286 } 1287 1288 const DILocalVariable *MachineInstr::getDebugVariable() const { 1289 assert(isDebugValue() && "not a DBG_VALUE"); 1290 return cast<DILocalVariable>(getOperand(2).getMetadata()); 1291 } 1292 1293 const DIExpression *MachineInstr::getDebugExpression() const { 1294 assert(isDebugValue() && "not a DBG_VALUE"); 1295 return cast<DIExpression>(getOperand(3).getMetadata()); 1296 } 1297 1298 const TargetRegisterClass* 1299 MachineInstr::getRegClassConstraint(unsigned OpIdx, 1300 const TargetInstrInfo *TII, 1301 const TargetRegisterInfo *TRI) const { 1302 assert(getParent() && "Can't have an MBB reference here!"); 1303 assert(getParent()->getParent() && "Can't have an MF reference here!"); 1304 const MachineFunction &MF = *getParent()->getParent(); 1305 1306 // Most opcodes have fixed constraints in their MCInstrDesc. 1307 if (!isInlineAsm()) 1308 return TII->getRegClass(getDesc(), OpIdx, TRI, MF); 1309 1310 if (!getOperand(OpIdx).isReg()) 1311 return nullptr; 1312 1313 // For tied uses on inline asm, get the constraint from the def. 1314 unsigned DefIdx; 1315 if (getOperand(OpIdx).isUse() && isRegTiedToDefOperand(OpIdx, &DefIdx)) 1316 OpIdx = DefIdx; 1317 1318 // Inline asm stores register class constraints in the flag word. 1319 int FlagIdx = findInlineAsmFlagIdx(OpIdx); 1320 if (FlagIdx < 0) 1321 return nullptr; 1322 1323 unsigned Flag = getOperand(FlagIdx).getImm(); 1324 unsigned RCID; 1325 if ((InlineAsm::getKind(Flag) == InlineAsm::Kind_RegUse || 1326 InlineAsm::getKind(Flag) == InlineAsm::Kind_RegDef || 1327 InlineAsm::getKind(Flag) == InlineAsm::Kind_RegDefEarlyClobber) && 1328 InlineAsm::hasRegClassConstraint(Flag, RCID)) 1329 return TRI->getRegClass(RCID); 1330 1331 // Assume that all registers in a memory operand are pointers. 1332 if (InlineAsm::getKind(Flag) == InlineAsm::Kind_Mem) 1333 return TRI->getPointerRegClass(MF); 1334 1335 return nullptr; 1336 } 1337 1338 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVReg( 1339 unsigned Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII, 1340 const TargetRegisterInfo *TRI, bool ExploreBundle) const { 1341 // Check every operands inside the bundle if we have 1342 // been asked to. 1343 if (ExploreBundle) 1344 for (ConstMIBundleOperands OpndIt(*this); OpndIt.isValid() && CurRC; 1345 ++OpndIt) 1346 CurRC = OpndIt->getParent()->getRegClassConstraintEffectForVRegImpl( 1347 OpndIt.getOperandNo(), Reg, CurRC, TII, TRI); 1348 else 1349 // Otherwise, just check the current operands. 1350 for (unsigned i = 0, e = NumOperands; i < e && CurRC; ++i) 1351 CurRC = getRegClassConstraintEffectForVRegImpl(i, Reg, CurRC, TII, TRI); 1352 return CurRC; 1353 } 1354 1355 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVRegImpl( 1356 unsigned OpIdx, unsigned Reg, const TargetRegisterClass *CurRC, 1357 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const { 1358 assert(CurRC && "Invalid initial register class"); 1359 // Check if Reg is constrained by some of its use/def from MI. 1360 const MachineOperand &MO = getOperand(OpIdx); 1361 if (!MO.isReg() || MO.getReg() != Reg) 1362 return CurRC; 1363 // If yes, accumulate the constraints through the operand. 1364 return getRegClassConstraintEffect(OpIdx, CurRC, TII, TRI); 1365 } 1366 1367 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffect( 1368 unsigned OpIdx, const TargetRegisterClass *CurRC, 1369 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const { 1370 const TargetRegisterClass *OpRC = getRegClassConstraint(OpIdx, TII, TRI); 1371 const MachineOperand &MO = getOperand(OpIdx); 1372 assert(MO.isReg() && 1373 "Cannot get register constraints for non-register operand"); 1374 assert(CurRC && "Invalid initial register class"); 1375 if (unsigned SubIdx = MO.getSubReg()) { 1376 if (OpRC) 1377 CurRC = TRI->getMatchingSuperRegClass(CurRC, OpRC, SubIdx); 1378 else 1379 CurRC = TRI->getSubClassWithSubReg(CurRC, SubIdx); 1380 } else if (OpRC) 1381 CurRC = TRI->getCommonSubClass(CurRC, OpRC); 1382 return CurRC; 1383 } 1384 1385 /// Return the number of instructions inside the MI bundle, not counting the 1386 /// header instruction. 1387 unsigned MachineInstr::getBundleSize() const { 1388 MachineBasicBlock::const_instr_iterator I = getIterator(); 1389 unsigned Size = 0; 1390 while (I->isBundledWithSucc()) { 1391 ++Size; 1392 ++I; 1393 } 1394 return Size; 1395 } 1396 1397 /// Returns true if the MachineInstr has an implicit-use operand of exactly 1398 /// the given register (not considering sub/super-registers). 1399 bool MachineInstr::hasRegisterImplicitUseOperand(unsigned Reg) const { 1400 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 1401 const MachineOperand &MO = getOperand(i); 1402 if (MO.isReg() && MO.isUse() && MO.isImplicit() && MO.getReg() == Reg) 1403 return true; 1404 } 1405 return false; 1406 } 1407 1408 /// findRegisterUseOperandIdx() - Returns the MachineOperand that is a use of 1409 /// the specific register or -1 if it is not found. It further tightens 1410 /// the search criteria to a use that kills the register if isKill is true. 1411 int MachineInstr::findRegisterUseOperandIdx( 1412 unsigned Reg, bool isKill, const TargetRegisterInfo *TRI) const { 1413 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 1414 const MachineOperand &MO = getOperand(i); 1415 if (!MO.isReg() || !MO.isUse()) 1416 continue; 1417 unsigned MOReg = MO.getReg(); 1418 if (!MOReg) 1419 continue; 1420 if (MOReg == Reg || (TRI && TargetRegisterInfo::isPhysicalRegister(MOReg) && 1421 TargetRegisterInfo::isPhysicalRegister(Reg) && 1422 TRI->isSubRegister(MOReg, Reg))) 1423 if (!isKill || MO.isKill()) 1424 return i; 1425 } 1426 return -1; 1427 } 1428 1429 /// readsWritesVirtualRegister - Return a pair of bools (reads, writes) 1430 /// indicating if this instruction reads or writes Reg. This also considers 1431 /// partial defines. 1432 std::pair<bool,bool> 1433 MachineInstr::readsWritesVirtualRegister(unsigned Reg, 1434 SmallVectorImpl<unsigned> *Ops) const { 1435 bool PartDef = false; // Partial redefine. 1436 bool FullDef = false; // Full define. 1437 bool Use = false; 1438 1439 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 1440 const MachineOperand &MO = getOperand(i); 1441 if (!MO.isReg() || MO.getReg() != Reg) 1442 continue; 1443 if (Ops) 1444 Ops->push_back(i); 1445 if (MO.isUse()) 1446 Use |= !MO.isUndef(); 1447 else if (MO.getSubReg() && !MO.isUndef()) 1448 // A partial <def,undef> doesn't count as reading the register. 1449 PartDef = true; 1450 else 1451 FullDef = true; 1452 } 1453 // A partial redefine uses Reg unless there is also a full define. 1454 return std::make_pair(Use || (PartDef && !FullDef), PartDef || FullDef); 1455 } 1456 1457 /// findRegisterDefOperandIdx() - Returns the operand index that is a def of 1458 /// the specified register or -1 if it is not found. If isDead is true, defs 1459 /// that are not dead are skipped. If TargetRegisterInfo is non-null, then it 1460 /// also checks if there is a def of a super-register. 1461 int 1462 MachineInstr::findRegisterDefOperandIdx(unsigned Reg, bool isDead, bool Overlap, 1463 const TargetRegisterInfo *TRI) const { 1464 bool isPhys = TargetRegisterInfo::isPhysicalRegister(Reg); 1465 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 1466 const MachineOperand &MO = getOperand(i); 1467 // Accept regmask operands when Overlap is set. 1468 // Ignore them when looking for a specific def operand (Overlap == false). 1469 if (isPhys && Overlap && MO.isRegMask() && MO.clobbersPhysReg(Reg)) 1470 return i; 1471 if (!MO.isReg() || !MO.isDef()) 1472 continue; 1473 unsigned MOReg = MO.getReg(); 1474 bool Found = (MOReg == Reg); 1475 if (!Found && TRI && isPhys && 1476 TargetRegisterInfo::isPhysicalRegister(MOReg)) { 1477 if (Overlap) 1478 Found = TRI->regsOverlap(MOReg, Reg); 1479 else 1480 Found = TRI->isSubRegister(MOReg, Reg); 1481 } 1482 if (Found && (!isDead || MO.isDead())) 1483 return i; 1484 } 1485 return -1; 1486 } 1487 1488 /// findFirstPredOperandIdx() - Find the index of the first operand in the 1489 /// operand list that is used to represent the predicate. It returns -1 if 1490 /// none is found. 1491 int MachineInstr::findFirstPredOperandIdx() const { 1492 // Don't call MCID.findFirstPredOperandIdx() because this variant 1493 // is sometimes called on an instruction that's not yet complete, and 1494 // so the number of operands is less than the MCID indicates. In 1495 // particular, the PTX target does this. 1496 const MCInstrDesc &MCID = getDesc(); 1497 if (MCID.isPredicable()) { 1498 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) 1499 if (MCID.OpInfo[i].isPredicate()) 1500 return i; 1501 } 1502 1503 return -1; 1504 } 1505 1506 // MachineOperand::TiedTo is 4 bits wide. 1507 const unsigned TiedMax = 15; 1508 1509 /// tieOperands - Mark operands at DefIdx and UseIdx as tied to each other. 1510 /// 1511 /// Use and def operands can be tied together, indicated by a non-zero TiedTo 1512 /// field. TiedTo can have these values: 1513 /// 1514 /// 0: Operand is not tied to anything. 1515 /// 1 to TiedMax-1: Tied to getOperand(TiedTo-1). 1516 /// TiedMax: Tied to an operand >= TiedMax-1. 1517 /// 1518 /// The tied def must be one of the first TiedMax operands on a normal 1519 /// instruction. INLINEASM instructions allow more tied defs. 1520 /// 1521 void MachineInstr::tieOperands(unsigned DefIdx, unsigned UseIdx) { 1522 MachineOperand &DefMO = getOperand(DefIdx); 1523 MachineOperand &UseMO = getOperand(UseIdx); 1524 assert(DefMO.isDef() && "DefIdx must be a def operand"); 1525 assert(UseMO.isUse() && "UseIdx must be a use operand"); 1526 assert(!DefMO.isTied() && "Def is already tied to another use"); 1527 assert(!UseMO.isTied() && "Use is already tied to another def"); 1528 1529 if (DefIdx < TiedMax) 1530 UseMO.TiedTo = DefIdx + 1; 1531 else { 1532 // Inline asm can use the group descriptors to find tied operands, but on 1533 // normal instruction, the tied def must be within the first TiedMax 1534 // operands. 1535 assert(isInlineAsm() && "DefIdx out of range"); 1536 UseMO.TiedTo = TiedMax; 1537 } 1538 1539 // UseIdx can be out of range, we'll search for it in findTiedOperandIdx(). 1540 DefMO.TiedTo = std::min(UseIdx + 1, TiedMax); 1541 } 1542 1543 /// Given the index of a tied register operand, find the operand it is tied to. 1544 /// Defs are tied to uses and vice versa. Returns the index of the tied operand 1545 /// which must exist. 1546 unsigned MachineInstr::findTiedOperandIdx(unsigned OpIdx) const { 1547 const MachineOperand &MO = getOperand(OpIdx); 1548 assert(MO.isTied() && "Operand isn't tied"); 1549 1550 // Normally TiedTo is in range. 1551 if (MO.TiedTo < TiedMax) 1552 return MO.TiedTo - 1; 1553 1554 // Uses on normal instructions can be out of range. 1555 if (!isInlineAsm()) { 1556 // Normal tied defs must be in the 0..TiedMax-1 range. 1557 if (MO.isUse()) 1558 return TiedMax - 1; 1559 // MO is a def. Search for the tied use. 1560 for (unsigned i = TiedMax - 1, e = getNumOperands(); i != e; ++i) { 1561 const MachineOperand &UseMO = getOperand(i); 1562 if (UseMO.isReg() && UseMO.isUse() && UseMO.TiedTo == OpIdx + 1) 1563 return i; 1564 } 1565 llvm_unreachable("Can't find tied use"); 1566 } 1567 1568 // Now deal with inline asm by parsing the operand group descriptor flags. 1569 // Find the beginning of each operand group. 1570 SmallVector<unsigned, 8> GroupIdx; 1571 unsigned OpIdxGroup = ~0u; 1572 unsigned NumOps; 1573 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e; 1574 i += NumOps) { 1575 const MachineOperand &FlagMO = getOperand(i); 1576 assert(FlagMO.isImm() && "Invalid tied operand on inline asm"); 1577 unsigned CurGroup = GroupIdx.size(); 1578 GroupIdx.push_back(i); 1579 NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm()); 1580 // OpIdx belongs to this operand group. 1581 if (OpIdx > i && OpIdx < i + NumOps) 1582 OpIdxGroup = CurGroup; 1583 unsigned TiedGroup; 1584 if (!InlineAsm::isUseOperandTiedToDef(FlagMO.getImm(), TiedGroup)) 1585 continue; 1586 // Operands in this group are tied to operands in TiedGroup which must be 1587 // earlier. Find the number of operands between the two groups. 1588 unsigned Delta = i - GroupIdx[TiedGroup]; 1589 1590 // OpIdx is a use tied to TiedGroup. 1591 if (OpIdxGroup == CurGroup) 1592 return OpIdx - Delta; 1593 1594 // OpIdx is a def tied to this use group. 1595 if (OpIdxGroup == TiedGroup) 1596 return OpIdx + Delta; 1597 } 1598 llvm_unreachable("Invalid tied operand on inline asm"); 1599 } 1600 1601 /// clearKillInfo - Clears kill flags on all operands. 1602 /// 1603 void MachineInstr::clearKillInfo() { 1604 for (MachineOperand &MO : operands()) { 1605 if (MO.isReg() && MO.isUse()) 1606 MO.setIsKill(false); 1607 } 1608 } 1609 1610 void MachineInstr::substituteRegister(unsigned FromReg, 1611 unsigned ToReg, 1612 unsigned SubIdx, 1613 const TargetRegisterInfo &RegInfo) { 1614 if (TargetRegisterInfo::isPhysicalRegister(ToReg)) { 1615 if (SubIdx) 1616 ToReg = RegInfo.getSubReg(ToReg, SubIdx); 1617 for (MachineOperand &MO : operands()) { 1618 if (!MO.isReg() || MO.getReg() != FromReg) 1619 continue; 1620 MO.substPhysReg(ToReg, RegInfo); 1621 } 1622 } else { 1623 for (MachineOperand &MO : operands()) { 1624 if (!MO.isReg() || MO.getReg() != FromReg) 1625 continue; 1626 MO.substVirtReg(ToReg, SubIdx, RegInfo); 1627 } 1628 } 1629 } 1630 1631 /// isSafeToMove - Return true if it is safe to move this instruction. If 1632 /// SawStore is set to true, it means that there is a store (or call) between 1633 /// the instruction's location and its intended destination. 1634 bool MachineInstr::isSafeToMove(AliasAnalysis *AA, bool &SawStore) const { 1635 // Ignore stuff that we obviously can't move. 1636 // 1637 // Treat volatile loads as stores. This is not strictly necessary for 1638 // volatiles, but it is required for atomic loads. It is not allowed to move 1639 // a load across an atomic load with Ordering > Monotonic. 1640 if (mayStore() || isCall() || 1641 (mayLoad() && hasOrderedMemoryRef())) { 1642 SawStore = true; 1643 return false; 1644 } 1645 1646 if (isPosition() || isDebugValue() || isTerminator() || 1647 hasUnmodeledSideEffects()) 1648 return false; 1649 1650 // See if this instruction does a load. If so, we have to guarantee that the 1651 // loaded value doesn't change between the load and the its intended 1652 // destination. The check for isInvariantLoad gives the targe the chance to 1653 // classify the load as always returning a constant, e.g. a constant pool 1654 // load. 1655 if (mayLoad() && !isDereferenceableInvariantLoad(AA)) 1656 // Otherwise, this is a real load. If there is a store between the load and 1657 // end of block, we can't move it. 1658 return !SawStore; 1659 1660 return true; 1661 } 1662 1663 bool MachineInstr::mayAlias(AliasAnalysis *AA, MachineInstr &Other, 1664 bool UseTBAA) { 1665 const MachineFunction *MF = getParent()->getParent(); 1666 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); 1667 1668 // If neither instruction stores to memory, they can't alias in any 1669 // meaningful way, even if they read from the same address. 1670 if (!mayStore() && !Other.mayStore()) 1671 return false; 1672 1673 // Let the target decide if memory accesses cannot possibly overlap. 1674 if (TII->areMemAccessesTriviallyDisjoint(*this, Other, AA)) 1675 return false; 1676 1677 if (!AA) 1678 return true; 1679 1680 // FIXME: Need to handle multiple memory operands to support all targets. 1681 if (!hasOneMemOperand() || !Other.hasOneMemOperand()) 1682 return true; 1683 1684 MachineMemOperand *MMOa = *memoperands_begin(); 1685 MachineMemOperand *MMOb = *Other.memoperands_begin(); 1686 1687 if (!MMOa->getValue() || !MMOb->getValue()) 1688 return true; 1689 1690 // The following interface to AA is fashioned after DAGCombiner::isAlias 1691 // and operates with MachineMemOperand offset with some important 1692 // assumptions: 1693 // - LLVM fundamentally assumes flat address spaces. 1694 // - MachineOperand offset can *only* result from legalization and 1695 // cannot affect queries other than the trivial case of overlap 1696 // checking. 1697 // - These offsets never wrap and never step outside 1698 // of allocated objects. 1699 // - There should never be any negative offsets here. 1700 // 1701 // FIXME: Modify API to hide this math from "user" 1702 // FIXME: Even before we go to AA we can reason locally about some 1703 // memory objects. It can save compile time, and possibly catch some 1704 // corner cases not currently covered. 1705 1706 assert((MMOa->getOffset() >= 0) && "Negative MachineMemOperand offset"); 1707 assert((MMOb->getOffset() >= 0) && "Negative MachineMemOperand offset"); 1708 1709 int64_t MinOffset = std::min(MMOa->getOffset(), MMOb->getOffset()); 1710 int64_t Overlapa = MMOa->getSize() + MMOa->getOffset() - MinOffset; 1711 int64_t Overlapb = MMOb->getSize() + MMOb->getOffset() - MinOffset; 1712 1713 AliasResult AAResult = 1714 AA->alias(MemoryLocation(MMOa->getValue(), Overlapa, 1715 UseTBAA ? MMOa->getAAInfo() : AAMDNodes()), 1716 MemoryLocation(MMOb->getValue(), Overlapb, 1717 UseTBAA ? MMOb->getAAInfo() : AAMDNodes())); 1718 1719 return (AAResult != NoAlias); 1720 } 1721 1722 /// hasOrderedMemoryRef - Return true if this instruction may have an ordered 1723 /// or volatile memory reference, or if the information describing the memory 1724 /// reference is not available. Return false if it is known to have no ordered 1725 /// memory references. 1726 bool MachineInstr::hasOrderedMemoryRef() const { 1727 // An instruction known never to access memory won't have a volatile access. 1728 if (!mayStore() && 1729 !mayLoad() && 1730 !isCall() && 1731 !hasUnmodeledSideEffects()) 1732 return false; 1733 1734 // Otherwise, if the instruction has no memory reference information, 1735 // conservatively assume it wasn't preserved. 1736 if (memoperands_empty()) 1737 return true; 1738 1739 // Check if any of our memory operands are ordered. 1740 return llvm::any_of(memoperands(), [](const MachineMemOperand *MMO) { 1741 return !MMO->isUnordered(); 1742 }); 1743 } 1744 1745 /// isDereferenceableInvariantLoad - Return true if this instruction will never 1746 /// trap and is loading from a location whose value is invariant across a run of 1747 /// this function. 1748 bool MachineInstr::isDereferenceableInvariantLoad(AliasAnalysis *AA) const { 1749 // If the instruction doesn't load at all, it isn't an invariant load. 1750 if (!mayLoad()) 1751 return false; 1752 1753 // If the instruction has lost its memoperands, conservatively assume that 1754 // it may not be an invariant load. 1755 if (memoperands_empty()) 1756 return false; 1757 1758 const MachineFrameInfo &MFI = getParent()->getParent()->getFrameInfo(); 1759 1760 for (MachineMemOperand *MMO : memoperands()) { 1761 if (MMO->isVolatile()) return false; 1762 if (MMO->isStore()) return false; 1763 if (MMO->isInvariant() && MMO->isDereferenceable()) 1764 continue; 1765 1766 // A load from a constant PseudoSourceValue is invariant. 1767 if (const PseudoSourceValue *PSV = MMO->getPseudoValue()) 1768 if (PSV->isConstant(&MFI)) 1769 continue; 1770 1771 if (const Value *V = MMO->getValue()) { 1772 // If we have an AliasAnalysis, ask it whether the memory is constant. 1773 if (AA && 1774 AA->pointsToConstantMemory( 1775 MemoryLocation(V, MMO->getSize(), MMO->getAAInfo()))) 1776 continue; 1777 } 1778 1779 // Otherwise assume conservatively. 1780 return false; 1781 } 1782 1783 // Everything checks out. 1784 return true; 1785 } 1786 1787 /// isConstantValuePHI - If the specified instruction is a PHI that always 1788 /// merges together the same virtual register, return the register, otherwise 1789 /// return 0. 1790 unsigned MachineInstr::isConstantValuePHI() const { 1791 if (!isPHI()) 1792 return 0; 1793 assert(getNumOperands() >= 3 && 1794 "It's illegal to have a PHI without source operands"); 1795 1796 unsigned Reg = getOperand(1).getReg(); 1797 for (unsigned i = 3, e = getNumOperands(); i < e; i += 2) 1798 if (getOperand(i).getReg() != Reg) 1799 return 0; 1800 return Reg; 1801 } 1802 1803 bool MachineInstr::hasUnmodeledSideEffects() const { 1804 if (hasProperty(MCID::UnmodeledSideEffects)) 1805 return true; 1806 if (isInlineAsm()) { 1807 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); 1808 if (ExtraInfo & InlineAsm::Extra_HasSideEffects) 1809 return true; 1810 } 1811 1812 return false; 1813 } 1814 1815 bool MachineInstr::isLoadFoldBarrier() const { 1816 return mayStore() || isCall() || hasUnmodeledSideEffects(); 1817 } 1818 1819 /// allDefsAreDead - Return true if all the defs of this instruction are dead. 1820 /// 1821 bool MachineInstr::allDefsAreDead() const { 1822 for (const MachineOperand &MO : operands()) { 1823 if (!MO.isReg() || MO.isUse()) 1824 continue; 1825 if (!MO.isDead()) 1826 return false; 1827 } 1828 return true; 1829 } 1830 1831 /// copyImplicitOps - Copy implicit register operands from specified 1832 /// instruction to this instruction. 1833 void MachineInstr::copyImplicitOps(MachineFunction &MF, 1834 const MachineInstr &MI) { 1835 for (unsigned i = MI.getDesc().getNumOperands(), e = MI.getNumOperands(); 1836 i != e; ++i) { 1837 const MachineOperand &MO = MI.getOperand(i); 1838 if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask()) 1839 addOperand(MF, MO); 1840 } 1841 } 1842 1843 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1844 LLVM_DUMP_METHOD void MachineInstr::dump() const { 1845 dbgs() << " "; 1846 print(dbgs()); 1847 } 1848 #endif 1849 1850 void MachineInstr::print(raw_ostream &OS, bool SkipOpers, bool SkipDebugLoc, 1851 const TargetInstrInfo *TII) const { 1852 const Module *M = nullptr; 1853 if (const MachineBasicBlock *MBB = getParent()) 1854 if (const MachineFunction *MF = MBB->getParent()) 1855 M = MF->getFunction()->getParent(); 1856 1857 ModuleSlotTracker MST(M); 1858 print(OS, MST, SkipOpers, SkipDebugLoc, TII); 1859 } 1860 1861 void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST, 1862 bool SkipOpers, bool SkipDebugLoc, 1863 const TargetInstrInfo *TII) const { 1864 // We can be a bit tidier if we know the MachineFunction. 1865 const MachineFunction *MF = nullptr; 1866 const TargetRegisterInfo *TRI = nullptr; 1867 const MachineRegisterInfo *MRI = nullptr; 1868 const TargetIntrinsicInfo *IntrinsicInfo = nullptr; 1869 1870 if (const MachineBasicBlock *MBB = getParent()) { 1871 MF = MBB->getParent(); 1872 if (MF) { 1873 MRI = &MF->getRegInfo(); 1874 TRI = MF->getSubtarget().getRegisterInfo(); 1875 if (!TII) 1876 TII = MF->getSubtarget().getInstrInfo(); 1877 IntrinsicInfo = MF->getTarget().getIntrinsicInfo(); 1878 } 1879 } 1880 1881 // Save a list of virtual registers. 1882 SmallVector<unsigned, 8> VirtRegs; 1883 1884 // Print explicitly defined operands on the left of an assignment syntax. 1885 unsigned StartOp = 0, e = getNumOperands(); 1886 for (; StartOp < e && getOperand(StartOp).isReg() && 1887 getOperand(StartOp).isDef() && 1888 !getOperand(StartOp).isImplicit(); 1889 ++StartOp) { 1890 if (StartOp != 0) OS << ", "; 1891 getOperand(StartOp).print(OS, MST, TRI, IntrinsicInfo); 1892 unsigned Reg = getOperand(StartOp).getReg(); 1893 if (TargetRegisterInfo::isVirtualRegister(Reg)) { 1894 VirtRegs.push_back(Reg); 1895 LLT Ty = MRI ? MRI->getType(Reg) : LLT{}; 1896 if (Ty.isValid()) 1897 OS << '(' << Ty << ')'; 1898 } 1899 } 1900 1901 if (StartOp != 0) 1902 OS << " = "; 1903 1904 // Print the opcode name. 1905 if (TII) 1906 OS << TII->getName(getOpcode()); 1907 else 1908 OS << "UNKNOWN"; 1909 1910 if (SkipOpers) 1911 return; 1912 1913 // Print the rest of the operands. 1914 bool FirstOp = true; 1915 unsigned AsmDescOp = ~0u; 1916 unsigned AsmOpCount = 0; 1917 1918 if (isInlineAsm() && e >= InlineAsm::MIOp_FirstOperand) { 1919 // Print asm string. 1920 OS << " "; 1921 getOperand(InlineAsm::MIOp_AsmString).print(OS, MST, TRI); 1922 1923 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack 1924 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); 1925 if (ExtraInfo & InlineAsm::Extra_HasSideEffects) 1926 OS << " [sideeffect]"; 1927 if (ExtraInfo & InlineAsm::Extra_MayLoad) 1928 OS << " [mayload]"; 1929 if (ExtraInfo & InlineAsm::Extra_MayStore) 1930 OS << " [maystore]"; 1931 if (ExtraInfo & InlineAsm::Extra_IsConvergent) 1932 OS << " [isconvergent]"; 1933 if (ExtraInfo & InlineAsm::Extra_IsAlignStack) 1934 OS << " [alignstack]"; 1935 if (getInlineAsmDialect() == InlineAsm::AD_ATT) 1936 OS << " [attdialect]"; 1937 if (getInlineAsmDialect() == InlineAsm::AD_Intel) 1938 OS << " [inteldialect]"; 1939 1940 StartOp = AsmDescOp = InlineAsm::MIOp_FirstOperand; 1941 FirstOp = false; 1942 } 1943 1944 for (unsigned i = StartOp, e = getNumOperands(); i != e; ++i) { 1945 const MachineOperand &MO = getOperand(i); 1946 1947 if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg())) 1948 VirtRegs.push_back(MO.getReg()); 1949 1950 if (FirstOp) FirstOp = false; else OS << ","; 1951 OS << " "; 1952 if (i < getDesc().NumOperands) { 1953 const MCOperandInfo &MCOI = getDesc().OpInfo[i]; 1954 if (MCOI.isPredicate()) 1955 OS << "pred:"; 1956 if (MCOI.isOptionalDef()) 1957 OS << "opt:"; 1958 } 1959 if (isDebugValue() && MO.isMetadata()) { 1960 // Pretty print DBG_VALUE instructions. 1961 auto *DIV = dyn_cast<DILocalVariable>(MO.getMetadata()); 1962 if (DIV && !DIV->getName().empty()) 1963 OS << "!\"" << DIV->getName() << '\"'; 1964 else 1965 MO.print(OS, MST, TRI); 1966 } else if (TRI && (isInsertSubreg() || isRegSequence() || 1967 (isSubregToReg() && i == 3)) && MO.isImm()) { 1968 OS << TRI->getSubRegIndexName(MO.getImm()); 1969 } else if (i == AsmDescOp && MO.isImm()) { 1970 // Pretty print the inline asm operand descriptor. 1971 OS << '$' << AsmOpCount++; 1972 unsigned Flag = MO.getImm(); 1973 switch (InlineAsm::getKind(Flag)) { 1974 case InlineAsm::Kind_RegUse: OS << ":[reguse"; break; 1975 case InlineAsm::Kind_RegDef: OS << ":[regdef"; break; 1976 case InlineAsm::Kind_RegDefEarlyClobber: OS << ":[regdef-ec"; break; 1977 case InlineAsm::Kind_Clobber: OS << ":[clobber"; break; 1978 case InlineAsm::Kind_Imm: OS << ":[imm"; break; 1979 case InlineAsm::Kind_Mem: OS << ":[mem"; break; 1980 default: OS << ":[??" << InlineAsm::getKind(Flag); break; 1981 } 1982 1983 unsigned RCID = 0; 1984 if (!InlineAsm::isImmKind(Flag) && !InlineAsm::isMemKind(Flag) && 1985 InlineAsm::hasRegClassConstraint(Flag, RCID)) { 1986 if (TRI) { 1987 OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID)); 1988 } else 1989 OS << ":RC" << RCID; 1990 } 1991 1992 if (InlineAsm::isMemKind(Flag)) { 1993 unsigned MCID = InlineAsm::getMemoryConstraintID(Flag); 1994 switch (MCID) { 1995 case InlineAsm::Constraint_es: OS << ":es"; break; 1996 case InlineAsm::Constraint_i: OS << ":i"; break; 1997 case InlineAsm::Constraint_m: OS << ":m"; break; 1998 case InlineAsm::Constraint_o: OS << ":o"; break; 1999 case InlineAsm::Constraint_v: OS << ":v"; break; 2000 case InlineAsm::Constraint_Q: OS << ":Q"; break; 2001 case InlineAsm::Constraint_R: OS << ":R"; break; 2002 case InlineAsm::Constraint_S: OS << ":S"; break; 2003 case InlineAsm::Constraint_T: OS << ":T"; break; 2004 case InlineAsm::Constraint_Um: OS << ":Um"; break; 2005 case InlineAsm::Constraint_Un: OS << ":Un"; break; 2006 case InlineAsm::Constraint_Uq: OS << ":Uq"; break; 2007 case InlineAsm::Constraint_Us: OS << ":Us"; break; 2008 case InlineAsm::Constraint_Ut: OS << ":Ut"; break; 2009 case InlineAsm::Constraint_Uv: OS << ":Uv"; break; 2010 case InlineAsm::Constraint_Uy: OS << ":Uy"; break; 2011 case InlineAsm::Constraint_X: OS << ":X"; break; 2012 case InlineAsm::Constraint_Z: OS << ":Z"; break; 2013 case InlineAsm::Constraint_ZC: OS << ":ZC"; break; 2014 case InlineAsm::Constraint_Zy: OS << ":Zy"; break; 2015 default: OS << ":?"; break; 2016 } 2017 } 2018 2019 unsigned TiedTo = 0; 2020 if (InlineAsm::isUseOperandTiedToDef(Flag, TiedTo)) 2021 OS << " tiedto:$" << TiedTo; 2022 2023 OS << ']'; 2024 2025 // Compute the index of the next operand descriptor. 2026 AsmDescOp += 1 + InlineAsm::getNumOperandRegisters(Flag); 2027 } else 2028 MO.print(OS, MST, TRI); 2029 } 2030 2031 bool HaveSemi = false; 2032 const unsigned PrintableFlags = FrameSetup | FrameDestroy; 2033 if (Flags & PrintableFlags) { 2034 if (!HaveSemi) { 2035 OS << ";"; 2036 HaveSemi = true; 2037 } 2038 OS << " flags: "; 2039 2040 if (Flags & FrameSetup) 2041 OS << "FrameSetup"; 2042 2043 if (Flags & FrameDestroy) 2044 OS << "FrameDestroy"; 2045 } 2046 2047 if (!memoperands_empty()) { 2048 if (!HaveSemi) { 2049 OS << ";"; 2050 HaveSemi = true; 2051 } 2052 2053 OS << " mem:"; 2054 for (mmo_iterator i = memoperands_begin(), e = memoperands_end(); 2055 i != e; ++i) { 2056 (*i)->print(OS, MST); 2057 if (std::next(i) != e) 2058 OS << " "; 2059 } 2060 } 2061 2062 // Print the regclass of any virtual registers encountered. 2063 if (MRI && !VirtRegs.empty()) { 2064 if (!HaveSemi) { 2065 OS << ";"; 2066 HaveSemi = true; 2067 } 2068 for (unsigned i = 0; i != VirtRegs.size(); ++i) { 2069 const RegClassOrRegBank &RC = MRI->getRegClassOrRegBank(VirtRegs[i]); 2070 if (!RC) 2071 continue; 2072 // Generic virtual registers do not have register classes. 2073 if (RC.is<const RegisterBank *>()) 2074 OS << " " << RC.get<const RegisterBank *>()->getName(); 2075 else 2076 OS << " " 2077 << TRI->getRegClassName(RC.get<const TargetRegisterClass *>()); 2078 OS << ':' << PrintReg(VirtRegs[i]); 2079 for (unsigned j = i+1; j != VirtRegs.size();) { 2080 if (MRI->getRegClassOrRegBank(VirtRegs[j]) != RC) { 2081 ++j; 2082 continue; 2083 } 2084 if (VirtRegs[i] != VirtRegs[j]) 2085 OS << "," << PrintReg(VirtRegs[j]); 2086 VirtRegs.erase(VirtRegs.begin()+j); 2087 } 2088 } 2089 } 2090 2091 // Print debug location information. 2092 if (isDebugValue() && getOperand(e - 2).isMetadata()) { 2093 if (!HaveSemi) 2094 OS << ";"; 2095 auto *DV = cast<DILocalVariable>(getOperand(e - 2).getMetadata()); 2096 OS << " line no:" << DV->getLine(); 2097 if (auto *InlinedAt = debugLoc->getInlinedAt()) { 2098 DebugLoc InlinedAtDL(InlinedAt); 2099 if (InlinedAtDL && MF) { 2100 OS << " inlined @[ "; 2101 InlinedAtDL.print(OS); 2102 OS << " ]"; 2103 } 2104 } 2105 if (isIndirectDebugValue()) 2106 OS << " indirect"; 2107 } else if (SkipDebugLoc) { 2108 return; 2109 } else if (debugLoc && MF) { 2110 if (!HaveSemi) 2111 OS << ";"; 2112 OS << " dbg:"; 2113 debugLoc.print(OS); 2114 } 2115 2116 OS << '\n'; 2117 } 2118 2119 bool MachineInstr::addRegisterKilled(unsigned IncomingReg, 2120 const TargetRegisterInfo *RegInfo, 2121 bool AddIfNotFound) { 2122 bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(IncomingReg); 2123 bool hasAliases = isPhysReg && 2124 MCRegAliasIterator(IncomingReg, RegInfo, false).isValid(); 2125 bool Found = false; 2126 SmallVector<unsigned,4> DeadOps; 2127 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 2128 MachineOperand &MO = getOperand(i); 2129 if (!MO.isReg() || !MO.isUse() || MO.isUndef()) 2130 continue; 2131 2132 // DEBUG_VALUE nodes do not contribute to code generation and should 2133 // always be ignored. Failure to do so may result in trying to modify 2134 // KILL flags on DEBUG_VALUE nodes. 2135 if (MO.isDebug()) 2136 continue; 2137 2138 unsigned Reg = MO.getReg(); 2139 if (!Reg) 2140 continue; 2141 2142 if (Reg == IncomingReg) { 2143 if (!Found) { 2144 if (MO.isKill()) 2145 // The register is already marked kill. 2146 return true; 2147 if (isPhysReg && isRegTiedToDefOperand(i)) 2148 // Two-address uses of physregs must not be marked kill. 2149 return true; 2150 MO.setIsKill(); 2151 Found = true; 2152 } 2153 } else if (hasAliases && MO.isKill() && 2154 TargetRegisterInfo::isPhysicalRegister(Reg)) { 2155 // A super-register kill already exists. 2156 if (RegInfo->isSuperRegister(IncomingReg, Reg)) 2157 return true; 2158 if (RegInfo->isSubRegister(IncomingReg, Reg)) 2159 DeadOps.push_back(i); 2160 } 2161 } 2162 2163 // Trim unneeded kill operands. 2164 while (!DeadOps.empty()) { 2165 unsigned OpIdx = DeadOps.back(); 2166 if (getOperand(OpIdx).isImplicit()) 2167 RemoveOperand(OpIdx); 2168 else 2169 getOperand(OpIdx).setIsKill(false); 2170 DeadOps.pop_back(); 2171 } 2172 2173 // If not found, this means an alias of one of the operands is killed. Add a 2174 // new implicit operand if required. 2175 if (!Found && AddIfNotFound) { 2176 addOperand(MachineOperand::CreateReg(IncomingReg, 2177 false /*IsDef*/, 2178 true /*IsImp*/, 2179 true /*IsKill*/)); 2180 return true; 2181 } 2182 return Found; 2183 } 2184 2185 void MachineInstr::clearRegisterKills(unsigned Reg, 2186 const TargetRegisterInfo *RegInfo) { 2187 if (!TargetRegisterInfo::isPhysicalRegister(Reg)) 2188 RegInfo = nullptr; 2189 for (MachineOperand &MO : operands()) { 2190 if (!MO.isReg() || !MO.isUse() || !MO.isKill()) 2191 continue; 2192 unsigned OpReg = MO.getReg(); 2193 if ((RegInfo && RegInfo->regsOverlap(Reg, OpReg)) || Reg == OpReg) 2194 MO.setIsKill(false); 2195 } 2196 } 2197 2198 bool MachineInstr::addRegisterDead(unsigned Reg, 2199 const TargetRegisterInfo *RegInfo, 2200 bool AddIfNotFound) { 2201 bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(Reg); 2202 bool hasAliases = isPhysReg && 2203 MCRegAliasIterator(Reg, RegInfo, false).isValid(); 2204 bool Found = false; 2205 SmallVector<unsigned,4> DeadOps; 2206 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 2207 MachineOperand &MO = getOperand(i); 2208 if (!MO.isReg() || !MO.isDef()) 2209 continue; 2210 unsigned MOReg = MO.getReg(); 2211 if (!MOReg) 2212 continue; 2213 2214 if (MOReg == Reg) { 2215 MO.setIsDead(); 2216 Found = true; 2217 } else if (hasAliases && MO.isDead() && 2218 TargetRegisterInfo::isPhysicalRegister(MOReg)) { 2219 // There exists a super-register that's marked dead. 2220 if (RegInfo->isSuperRegister(Reg, MOReg)) 2221 return true; 2222 if (RegInfo->isSubRegister(Reg, MOReg)) 2223 DeadOps.push_back(i); 2224 } 2225 } 2226 2227 // Trim unneeded dead operands. 2228 while (!DeadOps.empty()) { 2229 unsigned OpIdx = DeadOps.back(); 2230 if (getOperand(OpIdx).isImplicit()) 2231 RemoveOperand(OpIdx); 2232 else 2233 getOperand(OpIdx).setIsDead(false); 2234 DeadOps.pop_back(); 2235 } 2236 2237 // If not found, this means an alias of one of the operands is dead. Add a 2238 // new implicit operand if required. 2239 if (Found || !AddIfNotFound) 2240 return Found; 2241 2242 addOperand(MachineOperand::CreateReg(Reg, 2243 true /*IsDef*/, 2244 true /*IsImp*/, 2245 false /*IsKill*/, 2246 true /*IsDead*/)); 2247 return true; 2248 } 2249 2250 void MachineInstr::clearRegisterDeads(unsigned Reg) { 2251 for (MachineOperand &MO : operands()) { 2252 if (!MO.isReg() || !MO.isDef() || MO.getReg() != Reg) 2253 continue; 2254 MO.setIsDead(false); 2255 } 2256 } 2257 2258 void MachineInstr::setRegisterDefReadUndef(unsigned Reg, bool IsUndef) { 2259 for (MachineOperand &MO : operands()) { 2260 if (!MO.isReg() || !MO.isDef() || MO.getReg() != Reg || MO.getSubReg() == 0) 2261 continue; 2262 MO.setIsUndef(IsUndef); 2263 } 2264 } 2265 2266 void MachineInstr::addRegisterDefined(unsigned Reg, 2267 const TargetRegisterInfo *RegInfo) { 2268 if (TargetRegisterInfo::isPhysicalRegister(Reg)) { 2269 MachineOperand *MO = findRegisterDefOperand(Reg, false, RegInfo); 2270 if (MO) 2271 return; 2272 } else { 2273 for (const MachineOperand &MO : operands()) { 2274 if (MO.isReg() && MO.getReg() == Reg && MO.isDef() && 2275 MO.getSubReg() == 0) 2276 return; 2277 } 2278 } 2279 addOperand(MachineOperand::CreateReg(Reg, 2280 true /*IsDef*/, 2281 true /*IsImp*/)); 2282 } 2283 2284 void MachineInstr::setPhysRegsDeadExcept(ArrayRef<unsigned> UsedRegs, 2285 const TargetRegisterInfo &TRI) { 2286 bool HasRegMask = false; 2287 for (MachineOperand &MO : operands()) { 2288 if (MO.isRegMask()) { 2289 HasRegMask = true; 2290 continue; 2291 } 2292 if (!MO.isReg() || !MO.isDef()) continue; 2293 unsigned Reg = MO.getReg(); 2294 if (!TargetRegisterInfo::isPhysicalRegister(Reg)) continue; 2295 // If there are no uses, including partial uses, the def is dead. 2296 if (llvm::none_of(UsedRegs, 2297 [&](unsigned Use) { return TRI.regsOverlap(Use, Reg); })) 2298 MO.setIsDead(); 2299 } 2300 2301 // This is a call with a register mask operand. 2302 // Mask clobbers are always dead, so add defs for the non-dead defines. 2303 if (HasRegMask) 2304 for (ArrayRef<unsigned>::iterator I = UsedRegs.begin(), E = UsedRegs.end(); 2305 I != E; ++I) 2306 addRegisterDefined(*I, &TRI); 2307 } 2308 2309 unsigned 2310 MachineInstrExpressionTrait::getHashValue(const MachineInstr* const &MI) { 2311 // Build up a buffer of hash code components. 2312 SmallVector<size_t, 8> HashComponents; 2313 HashComponents.reserve(MI->getNumOperands() + 1); 2314 HashComponents.push_back(MI->getOpcode()); 2315 for (const MachineOperand &MO : MI->operands()) { 2316 if (MO.isReg() && MO.isDef() && 2317 TargetRegisterInfo::isVirtualRegister(MO.getReg())) 2318 continue; // Skip virtual register defs. 2319 2320 HashComponents.push_back(hash_value(MO)); 2321 } 2322 return hash_combine_range(HashComponents.begin(), HashComponents.end()); 2323 } 2324 2325 void MachineInstr::emitError(StringRef Msg) const { 2326 // Find the source location cookie. 2327 unsigned LocCookie = 0; 2328 const MDNode *LocMD = nullptr; 2329 for (unsigned i = getNumOperands(); i != 0; --i) { 2330 if (getOperand(i-1).isMetadata() && 2331 (LocMD = getOperand(i-1).getMetadata()) && 2332 LocMD->getNumOperands() != 0) { 2333 if (const ConstantInt *CI = 2334 mdconst::dyn_extract<ConstantInt>(LocMD->getOperand(0))) { 2335 LocCookie = CI->getZExtValue(); 2336 break; 2337 } 2338 } 2339 } 2340 2341 if (const MachineBasicBlock *MBB = getParent()) 2342 if (const MachineFunction *MF = MBB->getParent()) 2343 return MF->getMMI().getModule()->getContext().emitError(LocCookie, Msg); 2344 report_fatal_error(Msg); 2345 } 2346 2347 MachineInstrBuilder llvm::BuildMI(MachineFunction &MF, const DebugLoc &DL, 2348 const MCInstrDesc &MCID, bool IsIndirect, 2349 unsigned Reg, unsigned Offset, 2350 const MDNode *Variable, const MDNode *Expr) { 2351 assert(isa<DILocalVariable>(Variable) && "not a variable"); 2352 assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); 2353 assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) && 2354 "Expected inlined-at fields to agree"); 2355 if (IsIndirect) 2356 return BuildMI(MF, DL, MCID) 2357 .addReg(Reg, RegState::Debug) 2358 .addImm(Offset) 2359 .addMetadata(Variable) 2360 .addMetadata(Expr); 2361 else { 2362 assert(Offset == 0 && "A direct address cannot have an offset."); 2363 return BuildMI(MF, DL, MCID) 2364 .addReg(Reg, RegState::Debug) 2365 .addReg(0U, RegState::Debug) 2366 .addMetadata(Variable) 2367 .addMetadata(Expr); 2368 } 2369 } 2370 2371 MachineInstrBuilder llvm::BuildMI(MachineBasicBlock &BB, 2372 MachineBasicBlock::iterator I, 2373 const DebugLoc &DL, const MCInstrDesc &MCID, 2374 bool IsIndirect, unsigned Reg, 2375 unsigned Offset, const MDNode *Variable, 2376 const MDNode *Expr) { 2377 assert(isa<DILocalVariable>(Variable) && "not a variable"); 2378 assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); 2379 MachineFunction &MF = *BB.getParent(); 2380 MachineInstr *MI = 2381 BuildMI(MF, DL, MCID, IsIndirect, Reg, Offset, Variable, Expr); 2382 BB.insert(I, MI); 2383 return MachineInstrBuilder(MF, MI); 2384 } 2385 2386 MachineInstr *llvm::buildDbgValueForSpill(MachineBasicBlock &BB, 2387 MachineBasicBlock::iterator I, 2388 const MachineInstr &Orig, 2389 int FrameIndex) { 2390 const MDNode *Var = Orig.getDebugVariable(); 2391 const auto *Expr = cast_or_null<DIExpression>(Orig.getDebugExpression()); 2392 bool IsIndirect = Orig.isIndirectDebugValue(); 2393 uint64_t Offset = IsIndirect ? Orig.getOperand(1).getImm() : 0; 2394 DebugLoc DL = Orig.getDebugLoc(); 2395 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 2396 "Expected inlined-at fields to agree"); 2397 // If the DBG_VALUE already was a memory location, add an extra 2398 // DW_OP_deref. Otherwise just turning this from a register into a 2399 // memory/indirect location is sufficient. 2400 if (IsIndirect) 2401 Expr = DIExpression::prepend(Expr, DIExpression::WithDeref); 2402 return BuildMI(BB, I, DL, Orig.getDesc()) 2403 .addFrameIndex(FrameIndex) 2404 .addImm(Offset) 2405 .addMetadata(Var) 2406 .addMetadata(Expr); 2407 } 2408