1 //==- CodeGen/TargetRegisterInfo.h - Target Register Information -*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file describes an abstract interface used to get information about a 10 // target machines register file. This information is used for a variety of 11 // purposed, especially register allocation. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifndef LLVM_CODEGEN_TARGETREGISTERINFO_H 16 #define LLVM_CODEGEN_TARGETREGISTERINFO_H 17 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/ADT/StringRef.h" 21 #include "llvm/ADT/iterator_range.h" 22 #include "llvm/CodeGen/MachineBasicBlock.h" 23 #include "llvm/IR/CallingConv.h" 24 #include "llvm/MC/LaneBitmask.h" 25 #include "llvm/MC/MCRegisterInfo.h" 26 #include "llvm/Support/ErrorHandling.h" 27 #include "llvm/Support/MachineValueType.h" 28 #include "llvm/Support/MathExtras.h" 29 #include "llvm/Support/Printable.h" 30 #include <cassert> 31 #include <cstdint> 32 #include <functional> 33 34 namespace llvm { 35 36 class BitVector; 37 class DIExpression; 38 class LiveRegMatrix; 39 class MachineFunction; 40 class MachineInstr; 41 class RegScavenger; 42 class VirtRegMap; 43 class LiveIntervals; 44 class LiveInterval; 45 46 class TargetRegisterClass { 47 public: 48 using iterator = const MCPhysReg *; 49 using const_iterator = const MCPhysReg *; 50 using sc_iterator = const TargetRegisterClass* const *; 51 52 // Instance variables filled by tablegen, do not use! 53 const MCRegisterClass *MC; 54 const uint32_t *SubClassMask; 55 const uint16_t *SuperRegIndices; 56 const LaneBitmask LaneMask; 57 /// Classes with a higher priority value are assigned first by register 58 /// allocators using a greedy heuristic. The value is in the range [0,63]. 59 const uint8_t AllocationPriority; 60 /// Whether the class supports two (or more) disjunct subregister indices. 61 const bool HasDisjunctSubRegs; 62 /// Whether a combination of subregisters can cover every register in the 63 /// class. See also the CoveredBySubRegs description in Target.td. 64 const bool CoveredBySubRegs; 65 const sc_iterator SuperClasses; 66 ArrayRef<MCPhysReg> (*OrderFunc)(const MachineFunction&); 67 68 /// Return the register class ID number. getID()69 unsigned getID() const { return MC->getID(); } 70 71 /// begin/end - Return all of the registers in this class. 72 /// begin()73 iterator begin() const { return MC->begin(); } end()74 iterator end() const { return MC->end(); } 75 76 /// Return the number of registers in this class. getNumRegs()77 unsigned getNumRegs() const { return MC->getNumRegs(); } 78 79 iterator_range<SmallVectorImpl<MCPhysReg>::const_iterator> getRegisters()80 getRegisters() const { 81 return make_range(MC->begin(), MC->end()); 82 } 83 84 /// Return the specified register in the class. getRegister(unsigned i)85 MCRegister getRegister(unsigned i) const { 86 return MC->getRegister(i); 87 } 88 89 /// Return true if the specified register is included in this register class. 90 /// This does not include virtual registers. contains(Register Reg)91 bool contains(Register Reg) const { 92 /// FIXME: Historically this function has returned false when given vregs 93 /// but it should probably only receive physical registers 94 if (!Reg.isPhysical()) 95 return false; 96 return MC->contains(Reg.asMCReg()); 97 } 98 99 /// Return true if both registers are in this class. contains(Register Reg1,Register Reg2)100 bool contains(Register Reg1, Register Reg2) const { 101 /// FIXME: Historically this function has returned false when given a vregs 102 /// but it should probably only receive physical registers 103 if (!Reg1.isPhysical() || !Reg2.isPhysical()) 104 return false; 105 return MC->contains(Reg1.asMCReg(), Reg2.asMCReg()); 106 } 107 108 /// Return the cost of copying a value between two registers in this class. 109 /// A negative number means the register class is very expensive 110 /// to copy e.g. status flag register classes. getCopyCost()111 int getCopyCost() const { return MC->getCopyCost(); } 112 113 /// Return true if this register class may be used to create virtual 114 /// registers. isAllocatable()115 bool isAllocatable() const { return MC->isAllocatable(); } 116 117 /// Return true if the specified TargetRegisterClass 118 /// is a proper sub-class of this TargetRegisterClass. hasSubClass(const TargetRegisterClass * RC)119 bool hasSubClass(const TargetRegisterClass *RC) const { 120 return RC != this && hasSubClassEq(RC); 121 } 122 123 /// Returns true if RC is a sub-class of or equal to this class. hasSubClassEq(const TargetRegisterClass * RC)124 bool hasSubClassEq(const TargetRegisterClass *RC) const { 125 unsigned ID = RC->getID(); 126 return (SubClassMask[ID / 32] >> (ID % 32)) & 1; 127 } 128 129 /// Return true if the specified TargetRegisterClass is a 130 /// proper super-class of this TargetRegisterClass. hasSuperClass(const TargetRegisterClass * RC)131 bool hasSuperClass(const TargetRegisterClass *RC) const { 132 return RC->hasSubClass(this); 133 } 134 135 /// Returns true if RC is a super-class of or equal to this class. hasSuperClassEq(const TargetRegisterClass * RC)136 bool hasSuperClassEq(const TargetRegisterClass *RC) const { 137 return RC->hasSubClassEq(this); 138 } 139 140 /// Returns a bit vector of subclasses, including this one. 141 /// The vector is indexed by class IDs. 142 /// 143 /// To use it, consider the returned array as a chunk of memory that 144 /// contains an array of bits of size NumRegClasses. Each 32-bit chunk 145 /// contains a bitset of the ID of the subclasses in big-endian style. 146 147 /// I.e., the representation of the memory from left to right at the 148 /// bit level looks like: 149 /// [31 30 ... 1 0] [ 63 62 ... 33 32] ... 150 /// [ XXX NumRegClasses NumRegClasses - 1 ... ] 151 /// Where the number represents the class ID and XXX bits that 152 /// should be ignored. 153 /// 154 /// See the implementation of hasSubClassEq for an example of how it 155 /// can be used. getSubClassMask()156 const uint32_t *getSubClassMask() const { 157 return SubClassMask; 158 } 159 160 /// Returns a 0-terminated list of sub-register indices that project some 161 /// super-register class into this register class. The list has an entry for 162 /// each Idx such that: 163 /// 164 /// There exists SuperRC where: 165 /// For all Reg in SuperRC: 166 /// this->contains(Reg:Idx) getSuperRegIndices()167 const uint16_t *getSuperRegIndices() const { 168 return SuperRegIndices; 169 } 170 171 /// Returns a NULL-terminated list of super-classes. The 172 /// classes are ordered by ID which is also a topological ordering from large 173 /// to small classes. The list does NOT include the current class. getSuperClasses()174 sc_iterator getSuperClasses() const { 175 return SuperClasses; 176 } 177 178 /// Return true if this TargetRegisterClass is a subset 179 /// class of at least one other TargetRegisterClass. isASubClass()180 bool isASubClass() const { 181 return SuperClasses[0] != nullptr; 182 } 183 184 /// Returns the preferred order for allocating registers from this register 185 /// class in MF. The raw order comes directly from the .td file and may 186 /// include reserved registers that are not allocatable. 187 /// Register allocators should also make sure to allocate 188 /// callee-saved registers only after all the volatiles are used. The 189 /// RegisterClassInfo class provides filtered allocation orders with 190 /// callee-saved registers moved to the end. 191 /// 192 /// The MachineFunction argument can be used to tune the allocatable 193 /// registers based on the characteristics of the function, subtarget, or 194 /// other criteria. 195 /// 196 /// By default, this method returns all registers in the class. getRawAllocationOrder(const MachineFunction & MF)197 ArrayRef<MCPhysReg> getRawAllocationOrder(const MachineFunction &MF) const { 198 return OrderFunc ? OrderFunc(MF) : makeArrayRef(begin(), getNumRegs()); 199 } 200 201 /// Returns the combination of all lane masks of register in this class. 202 /// The lane masks of the registers are the combination of all lane masks 203 /// of their subregisters. Returns 1 if there are no subregisters. getLaneMask()204 LaneBitmask getLaneMask() const { 205 return LaneMask; 206 } 207 }; 208 209 /// Extra information, not in MCRegisterDesc, about registers. 210 /// These are used by codegen, not by MC. 211 struct TargetRegisterInfoDesc { 212 const uint8_t *CostPerUse; // Extra cost of instructions using register. 213 unsigned NumCosts; // Number of cost values associated with each register. 214 const bool 215 *InAllocatableClass; // Register belongs to an allocatable regclass. 216 }; 217 218 /// Each TargetRegisterClass has a per register weight, and weight 219 /// limit which must be less than the limits of its pressure sets. 220 struct RegClassWeight { 221 unsigned RegWeight; 222 unsigned WeightLimit; 223 }; 224 225 /// TargetRegisterInfo base class - We assume that the target defines a static 226 /// array of TargetRegisterDesc objects that represent all of the machine 227 /// registers that the target has. As such, we simply have to track a pointer 228 /// to this array so that we can turn register number into a register 229 /// descriptor. 230 /// 231 class TargetRegisterInfo : public MCRegisterInfo { 232 public: 233 using regclass_iterator = const TargetRegisterClass * const *; 234 using vt_iterator = const MVT::SimpleValueType *; 235 struct RegClassInfo { 236 unsigned RegSize, SpillSize, SpillAlignment; 237 vt_iterator VTList; 238 }; 239 private: 240 const TargetRegisterInfoDesc *InfoDesc; // Extra desc array for codegen 241 const char *const *SubRegIndexNames; // Names of subreg indexes. 242 // Pointer to array of lane masks, one per sub-reg index. 243 const LaneBitmask *SubRegIndexLaneMasks; 244 245 regclass_iterator RegClassBegin, RegClassEnd; // List of regclasses 246 LaneBitmask CoveringLanes; 247 const RegClassInfo *const RCInfos; 248 unsigned HwMode; 249 250 protected: 251 TargetRegisterInfo(const TargetRegisterInfoDesc *ID, 252 regclass_iterator RCB, 253 regclass_iterator RCE, 254 const char *const *SRINames, 255 const LaneBitmask *SRILaneMasks, 256 LaneBitmask CoveringLanes, 257 const RegClassInfo *const RCIs, 258 unsigned Mode = 0); 259 virtual ~TargetRegisterInfo(); 260 261 public: 262 // Register numbers can represent physical registers, virtual registers, and 263 // sometimes stack slots. The unsigned values are divided into these ranges: 264 // 265 // 0 Not a register, can be used as a sentinel. 266 // [1;2^30) Physical registers assigned by TableGen. 267 // [2^30;2^31) Stack slots. (Rarely used.) 268 // [2^31;2^32) Virtual registers assigned by MachineRegisterInfo. 269 // 270 // Further sentinels can be allocated from the small negative integers. 271 // DenseMapInfo<unsigned> uses -1u and -2u. 272 273 /// Return the size in bits of a register from class RC. getRegSizeInBits(const TargetRegisterClass & RC)274 unsigned getRegSizeInBits(const TargetRegisterClass &RC) const { 275 return getRegClassInfo(RC).RegSize; 276 } 277 278 /// Return the size in bytes of the stack slot allocated to hold a spilled 279 /// copy of a register from class RC. getSpillSize(const TargetRegisterClass & RC)280 unsigned getSpillSize(const TargetRegisterClass &RC) const { 281 return getRegClassInfo(RC).SpillSize / 8; 282 } 283 284 /// Return the minimum required alignment in bytes for a spill slot for 285 /// a register of this class. getSpillAlign(const TargetRegisterClass & RC)286 Align getSpillAlign(const TargetRegisterClass &RC) const { 287 return Align(getRegClassInfo(RC).SpillAlignment / 8); 288 } 289 290 /// Return true if the given TargetRegisterClass has the ValueType T. isTypeLegalForClass(const TargetRegisterClass & RC,MVT T)291 bool isTypeLegalForClass(const TargetRegisterClass &RC, MVT T) const { 292 for (auto I = legalclasstypes_begin(RC); *I != MVT::Other; ++I) 293 if (MVT(*I) == T) 294 return true; 295 return false; 296 } 297 298 /// Return true if the given TargetRegisterClass is compatible with LLT T. isTypeLegalForClass(const TargetRegisterClass & RC,LLT T)299 bool isTypeLegalForClass(const TargetRegisterClass &RC, LLT T) const { 300 for (auto I = legalclasstypes_begin(RC); *I != MVT::Other; ++I) { 301 MVT VT(*I); 302 if (VT == MVT::Untyped) 303 return true; 304 305 if (LLT(VT) == T) 306 return true; 307 } 308 return false; 309 } 310 311 /// Loop over all of the value types that can be represented by values 312 /// in the given register class. legalclasstypes_begin(const TargetRegisterClass & RC)313 vt_iterator legalclasstypes_begin(const TargetRegisterClass &RC) const { 314 return getRegClassInfo(RC).VTList; 315 } 316 legalclasstypes_end(const TargetRegisterClass & RC)317 vt_iterator legalclasstypes_end(const TargetRegisterClass &RC) const { 318 vt_iterator I = legalclasstypes_begin(RC); 319 while (*I != MVT::Other) 320 ++I; 321 return I; 322 } 323 324 /// Returns the Register Class of a physical register of the given type, 325 /// picking the most sub register class of the right type that contains this 326 /// physreg. 327 const TargetRegisterClass *getMinimalPhysRegClass(MCRegister Reg, 328 MVT VT = MVT::Other) const; 329 330 /// Returns the Register Class of a physical register of the given type, 331 /// picking the most sub register class of the right type that contains this 332 /// physreg. If there is no register class compatible with the given type, 333 /// returns nullptr. 334 const TargetRegisterClass *getMinimalPhysRegClassLLT(MCRegister Reg, 335 LLT Ty = LLT()) const; 336 337 /// Return the maximal subclass of the given register class that is 338 /// allocatable or NULL. 339 const TargetRegisterClass * 340 getAllocatableClass(const TargetRegisterClass *RC) const; 341 342 /// Returns a bitset indexed by register number indicating if a register is 343 /// allocatable or not. If a register class is specified, returns the subset 344 /// for the class. 345 BitVector getAllocatableSet(const MachineFunction &MF, 346 const TargetRegisterClass *RC = nullptr) const; 347 348 /// Get a list of cost values for all registers that correspond to the index 349 /// returned by RegisterCostTableIndex. getRegisterCosts(const MachineFunction & MF)350 ArrayRef<uint8_t> getRegisterCosts(const MachineFunction &MF) const { 351 unsigned Idx = getRegisterCostTableIndex(MF); 352 unsigned NumRegs = getNumRegs(); 353 assert(Idx < InfoDesc->NumCosts && "CostPerUse index out of bounds"); 354 355 return makeArrayRef(&InfoDesc->CostPerUse[Idx * NumRegs], NumRegs); 356 } 357 358 /// Return true if the register is in the allocation of any register class. isInAllocatableClass(MCRegister RegNo)359 bool isInAllocatableClass(MCRegister RegNo) const { 360 return InfoDesc->InAllocatableClass[RegNo]; 361 } 362 363 /// Return the human-readable symbolic target-specific 364 /// name for the specified SubRegIndex. getSubRegIndexName(unsigned SubIdx)365 const char *getSubRegIndexName(unsigned SubIdx) const { 366 assert(SubIdx && SubIdx < getNumSubRegIndices() && 367 "This is not a subregister index"); 368 return SubRegIndexNames[SubIdx-1]; 369 } 370 371 /// Return a bitmask representing the parts of a register that are covered by 372 /// SubIdx \see LaneBitmask. 373 /// 374 /// SubIdx == 0 is allowed, it has the lane mask ~0u. getSubRegIndexLaneMask(unsigned SubIdx)375 LaneBitmask getSubRegIndexLaneMask(unsigned SubIdx) const { 376 assert(SubIdx < getNumSubRegIndices() && "This is not a subregister index"); 377 return SubRegIndexLaneMasks[SubIdx]; 378 } 379 380 /// Try to find one or more subregister indexes to cover \p LaneMask. 381 /// 382 /// If this is possible, returns true and appends the best matching set of 383 /// indexes to \p Indexes. If this is not possible, returns false. 384 bool getCoveringSubRegIndexes(const MachineRegisterInfo &MRI, 385 const TargetRegisterClass *RC, 386 LaneBitmask LaneMask, 387 SmallVectorImpl<unsigned> &Indexes) const; 388 389 /// The lane masks returned by getSubRegIndexLaneMask() above can only be 390 /// used to determine if sub-registers overlap - they can't be used to 391 /// determine if a set of sub-registers completely cover another 392 /// sub-register. 393 /// 394 /// The X86 general purpose registers have two lanes corresponding to the 395 /// sub_8bit and sub_8bit_hi sub-registers. Both sub_32bit and sub_16bit have 396 /// lane masks '3', but the sub_16bit sub-register doesn't fully cover the 397 /// sub_32bit sub-register. 398 /// 399 /// On the other hand, the ARM NEON lanes fully cover their registers: The 400 /// dsub_0 sub-register is completely covered by the ssub_0 and ssub_1 lanes. 401 /// This is related to the CoveredBySubRegs property on register definitions. 402 /// 403 /// This function returns a bit mask of lanes that completely cover their 404 /// sub-registers. More precisely, given: 405 /// 406 /// Covering = getCoveringLanes(); 407 /// MaskA = getSubRegIndexLaneMask(SubA); 408 /// MaskB = getSubRegIndexLaneMask(SubB); 409 /// 410 /// If (MaskA & ~(MaskB & Covering)) == 0, then SubA is completely covered by 411 /// SubB. getCoveringLanes()412 LaneBitmask getCoveringLanes() const { return CoveringLanes; } 413 414 /// Returns true if the two registers are equal or alias each other. 415 /// The registers may be virtual registers. regsOverlap(Register regA,Register regB)416 bool regsOverlap(Register regA, Register regB) const { 417 if (regA == regB) return true; 418 if (!regA.isPhysical() || !regB.isPhysical()) 419 return false; 420 421 // Regunits are numerically ordered. Find a common unit. 422 MCRegUnitIterator RUA(regA.asMCReg(), this); 423 MCRegUnitIterator RUB(regB.asMCReg(), this); 424 do { 425 if (*RUA == *RUB) return true; 426 if (*RUA < *RUB) ++RUA; 427 else ++RUB; 428 } while (RUA.isValid() && RUB.isValid()); 429 return false; 430 } 431 432 /// Returns true if Reg contains RegUnit. hasRegUnit(MCRegister Reg,Register RegUnit)433 bool hasRegUnit(MCRegister Reg, Register RegUnit) const { 434 for (MCRegUnitIterator Units(Reg, this); Units.isValid(); ++Units) 435 if (Register(*Units) == RegUnit) 436 return true; 437 return false; 438 } 439 440 /// Returns the original SrcReg unless it is the target of a copy-like 441 /// operation, in which case we chain backwards through all such operations 442 /// to the ultimate source register. If a physical register is encountered, 443 /// we stop the search. 444 virtual Register lookThruCopyLike(Register SrcReg, 445 const MachineRegisterInfo *MRI) const; 446 447 /// Find the original SrcReg unless it is the target of a copy-like operation, 448 /// in which case we chain backwards through all such operations to the 449 /// ultimate source register. If a physical register is encountered, we stop 450 /// the search. 451 /// Return the original SrcReg if all the definitions in the chain only have 452 /// one user and not a physical register. 453 virtual Register 454 lookThruSingleUseCopyChain(Register SrcReg, 455 const MachineRegisterInfo *MRI) const; 456 457 /// Return a null-terminated list of all of the callee-saved registers on 458 /// this target. The register should be in the order of desired callee-save 459 /// stack frame offset. The first register is closest to the incoming stack 460 /// pointer if stack grows down, and vice versa. 461 /// Notice: This function does not take into account disabled CSRs. 462 /// In most cases you will want to use instead the function 463 /// getCalleeSavedRegs that is implemented in MachineRegisterInfo. 464 virtual const MCPhysReg* 465 getCalleeSavedRegs(const MachineFunction *MF) const = 0; 466 467 /// Return a mask of call-preserved registers for the given calling convention 468 /// on the current function. The mask should include all call-preserved 469 /// aliases. This is used by the register allocator to determine which 470 /// registers can be live across a call. 471 /// 472 /// The mask is an array containing (TRI::getNumRegs()+31)/32 entries. 473 /// A set bit indicates that all bits of the corresponding register are 474 /// preserved across the function call. The bit mask is expected to be 475 /// sub-register complete, i.e. if A is preserved, so are all its 476 /// sub-registers. 477 /// 478 /// Bits are numbered from the LSB, so the bit for physical register Reg can 479 /// be found as (Mask[Reg / 32] >> Reg % 32) & 1. 480 /// 481 /// A NULL pointer means that no register mask will be used, and call 482 /// instructions should use implicit-def operands to indicate call clobbered 483 /// registers. 484 /// getCallPreservedMask(const MachineFunction & MF,CallingConv::ID)485 virtual const uint32_t *getCallPreservedMask(const MachineFunction &MF, 486 CallingConv::ID) const { 487 // The default mask clobbers everything. All targets should override. 488 return nullptr; 489 } 490 491 /// Return a register mask for the registers preserved by the unwinder, 492 /// or nullptr if no custom mask is needed. 493 virtual const uint32_t * getCustomEHPadPreservedMask(const MachineFunction & MF)494 getCustomEHPadPreservedMask(const MachineFunction &MF) const { 495 return nullptr; 496 } 497 498 /// Return a register mask that clobbers everything. getNoPreservedMask()499 virtual const uint32_t *getNoPreservedMask() const { 500 llvm_unreachable("target does not provide no preserved mask"); 501 } 502 503 /// Return a list of all of the registers which are clobbered "inside" a call 504 /// to the given function. For example, these might be needed for PLT 505 /// sequences of long-branch veneers. 506 virtual ArrayRef<MCPhysReg> getIntraCallClobberedRegs(const MachineFunction * MF)507 getIntraCallClobberedRegs(const MachineFunction *MF) const { 508 return {}; 509 } 510 511 /// Return true if all bits that are set in mask \p mask0 are also set in 512 /// \p mask1. 513 bool regmaskSubsetEqual(const uint32_t *mask0, const uint32_t *mask1) const; 514 515 /// Return all the call-preserved register masks defined for this target. 516 virtual ArrayRef<const uint32_t *> getRegMasks() const = 0; 517 virtual ArrayRef<const char *> getRegMaskNames() const = 0; 518 519 /// Returns a bitset indexed by physical register number indicating if a 520 /// register is a special register that has particular uses and should be 521 /// considered unavailable at all times, e.g. stack pointer, return address. 522 /// A reserved register: 523 /// - is not allocatable 524 /// - is considered always live 525 /// - is ignored by liveness tracking 526 /// It is often necessary to reserve the super registers of a reserved 527 /// register as well, to avoid them getting allocated indirectly. You may use 528 /// markSuperRegs() and checkAllSuperRegsMarked() in this case. 529 virtual BitVector getReservedRegs(const MachineFunction &MF) const = 0; 530 531 /// Returns false if we can't guarantee that Physreg, specified as an IR asm 532 /// clobber constraint, will be preserved across the statement. isAsmClobberable(const MachineFunction & MF,MCRegister PhysReg)533 virtual bool isAsmClobberable(const MachineFunction &MF, 534 MCRegister PhysReg) const { 535 return true; 536 } 537 538 /// Returns true if PhysReg cannot be written to in inline asm statements. isInlineAsmReadOnlyReg(const MachineFunction & MF,unsigned PhysReg)539 virtual bool isInlineAsmReadOnlyReg(const MachineFunction &MF, 540 unsigned PhysReg) const { 541 return false; 542 } 543 544 /// Returns true if PhysReg is unallocatable and constant throughout the 545 /// function. Used by MachineRegisterInfo::isConstantPhysReg(). isConstantPhysReg(MCRegister PhysReg)546 virtual bool isConstantPhysReg(MCRegister PhysReg) const { return false; } 547 548 /// Returns true if the register class is considered divergent. isDivergentRegClass(const TargetRegisterClass * RC)549 virtual bool isDivergentRegClass(const TargetRegisterClass *RC) const { 550 return false; 551 } 552 553 /// Physical registers that may be modified within a function but are 554 /// guaranteed to be restored before any uses. This is useful for targets that 555 /// have call sequences where a GOT register may be updated by the caller 556 /// prior to a call and is guaranteed to be restored (also by the caller) 557 /// after the call. isCallerPreservedPhysReg(MCRegister PhysReg,const MachineFunction & MF)558 virtual bool isCallerPreservedPhysReg(MCRegister PhysReg, 559 const MachineFunction &MF) const { 560 return false; 561 } 562 563 /// This is a wrapper around getCallPreservedMask(). 564 /// Return true if the register is preserved after the call. 565 virtual bool isCalleeSavedPhysReg(MCRegister PhysReg, 566 const MachineFunction &MF) const; 567 568 /// Prior to adding the live-out mask to a stackmap or patchpoint 569 /// instruction, provide the target the opportunity to adjust it (mainly to 570 /// remove pseudo-registers that should be ignored). adjustStackMapLiveOutMask(uint32_t * Mask)571 virtual void adjustStackMapLiveOutMask(uint32_t *Mask) const {} 572 573 /// Return a super-register of the specified register 574 /// Reg so its sub-register of index SubIdx is Reg. getMatchingSuperReg(MCRegister Reg,unsigned SubIdx,const TargetRegisterClass * RC)575 MCRegister getMatchingSuperReg(MCRegister Reg, unsigned SubIdx, 576 const TargetRegisterClass *RC) const { 577 return MCRegisterInfo::getMatchingSuperReg(Reg, SubIdx, RC->MC); 578 } 579 580 /// Return a subclass of the specified register 581 /// class A so that each register in it has a sub-register of the 582 /// specified sub-register index which is in the specified register class B. 583 /// 584 /// TableGen will synthesize missing A sub-classes. 585 virtual const TargetRegisterClass * 586 getMatchingSuperRegClass(const TargetRegisterClass *A, 587 const TargetRegisterClass *B, unsigned Idx) const; 588 589 // For a copy-like instruction that defines a register of class DefRC with 590 // subreg index DefSubReg, reading from another source with class SrcRC and 591 // subregister SrcSubReg return true if this is a preferable copy 592 // instruction or an earlier use should be used. 593 virtual bool shouldRewriteCopySrc(const TargetRegisterClass *DefRC, 594 unsigned DefSubReg, 595 const TargetRegisterClass *SrcRC, 596 unsigned SrcSubReg) const; 597 598 /// Returns the largest legal sub-class of RC that 599 /// supports the sub-register index Idx. 600 /// If no such sub-class exists, return NULL. 601 /// If all registers in RC already have an Idx sub-register, return RC. 602 /// 603 /// TableGen generates a version of this function that is good enough in most 604 /// cases. Targets can override if they have constraints that TableGen 605 /// doesn't understand. For example, the x86 sub_8bit sub-register index is 606 /// supported by the full GR32 register class in 64-bit mode, but only by the 607 /// GR32_ABCD regiister class in 32-bit mode. 608 /// 609 /// TableGen will synthesize missing RC sub-classes. 610 virtual const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass * RC,unsigned Idx)611 getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const { 612 assert(Idx == 0 && "Target has no sub-registers"); 613 return RC; 614 } 615 616 /// Return the subregister index you get from composing 617 /// two subregister indices. 618 /// 619 /// The special null sub-register index composes as the identity. 620 /// 621 /// If R:a:b is the same register as R:c, then composeSubRegIndices(a, b) 622 /// returns c. Note that composeSubRegIndices does not tell you about illegal 623 /// compositions. If R does not have a subreg a, or R:a does not have a subreg 624 /// b, composeSubRegIndices doesn't tell you. 625 /// 626 /// The ARM register Q0 has two D subregs dsub_0:D0 and dsub_1:D1. It also has 627 /// ssub_0:S0 - ssub_3:S3 subregs. 628 /// If you compose subreg indices dsub_1, ssub_0 you get ssub_2. composeSubRegIndices(unsigned a,unsigned b)629 unsigned composeSubRegIndices(unsigned a, unsigned b) const { 630 if (!a) return b; 631 if (!b) return a; 632 return composeSubRegIndicesImpl(a, b); 633 } 634 635 /// Transforms a LaneMask computed for one subregister to the lanemask that 636 /// would have been computed when composing the subsubregisters with IdxA 637 /// first. @sa composeSubRegIndices() composeSubRegIndexLaneMask(unsigned IdxA,LaneBitmask Mask)638 LaneBitmask composeSubRegIndexLaneMask(unsigned IdxA, 639 LaneBitmask Mask) const { 640 if (!IdxA) 641 return Mask; 642 return composeSubRegIndexLaneMaskImpl(IdxA, Mask); 643 } 644 645 /// Transform a lanemask given for a virtual register to the corresponding 646 /// lanemask before using subregister with index \p IdxA. 647 /// This is the reverse of composeSubRegIndexLaneMask(), assuming Mask is a 648 /// valie lane mask (no invalid bits set) the following holds: 649 /// X0 = composeSubRegIndexLaneMask(Idx, Mask) 650 /// X1 = reverseComposeSubRegIndexLaneMask(Idx, X0) 651 /// => X1 == Mask reverseComposeSubRegIndexLaneMask(unsigned IdxA,LaneBitmask LaneMask)652 LaneBitmask reverseComposeSubRegIndexLaneMask(unsigned IdxA, 653 LaneBitmask LaneMask) const { 654 if (!IdxA) 655 return LaneMask; 656 return reverseComposeSubRegIndexLaneMaskImpl(IdxA, LaneMask); 657 } 658 659 /// Debugging helper: dump register in human readable form to dbgs() stream. 660 static void dumpReg(Register Reg, unsigned SubRegIndex = 0, 661 const TargetRegisterInfo *TRI = nullptr); 662 663 protected: 664 /// Overridden by TableGen in targets that have sub-registers. composeSubRegIndicesImpl(unsigned,unsigned)665 virtual unsigned composeSubRegIndicesImpl(unsigned, unsigned) const { 666 llvm_unreachable("Target has no sub-registers"); 667 } 668 669 /// Overridden by TableGen in targets that have sub-registers. 670 virtual LaneBitmask composeSubRegIndexLaneMaskImpl(unsigned,LaneBitmask)671 composeSubRegIndexLaneMaskImpl(unsigned, LaneBitmask) const { 672 llvm_unreachable("Target has no sub-registers"); 673 } 674 reverseComposeSubRegIndexLaneMaskImpl(unsigned,LaneBitmask)675 virtual LaneBitmask reverseComposeSubRegIndexLaneMaskImpl(unsigned, 676 LaneBitmask) const { 677 llvm_unreachable("Target has no sub-registers"); 678 } 679 680 /// Return the register cost table index. This implementation is sufficient 681 /// for most architectures and can be overriden by targets in case there are 682 /// multiple cost values associated with each register. getRegisterCostTableIndex(const MachineFunction & MF)683 virtual unsigned getRegisterCostTableIndex(const MachineFunction &MF) const { 684 return 0; 685 } 686 687 public: 688 /// Find a common super-register class if it exists. 689 /// 690 /// Find a register class, SuperRC and two sub-register indices, PreA and 691 /// PreB, such that: 692 /// 693 /// 1. PreA + SubA == PreB + SubB (using composeSubRegIndices()), and 694 /// 695 /// 2. For all Reg in SuperRC: Reg:PreA in RCA and Reg:PreB in RCB, and 696 /// 697 /// 3. SuperRC->getSize() >= max(RCA->getSize(), RCB->getSize()). 698 /// 699 /// SuperRC will be chosen such that no super-class of SuperRC satisfies the 700 /// requirements, and there is no register class with a smaller spill size 701 /// that satisfies the requirements. 702 /// 703 /// SubA and SubB must not be 0. Use getMatchingSuperRegClass() instead. 704 /// 705 /// Either of the PreA and PreB sub-register indices may be returned as 0. In 706 /// that case, the returned register class will be a sub-class of the 707 /// corresponding argument register class. 708 /// 709 /// The function returns NULL if no register class can be found. 710 const TargetRegisterClass* 711 getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA, 712 const TargetRegisterClass *RCB, unsigned SubB, 713 unsigned &PreA, unsigned &PreB) const; 714 715 //===--------------------------------------------------------------------===// 716 // Register Class Information 717 // 718 protected: getRegClassInfo(const TargetRegisterClass & RC)719 const RegClassInfo &getRegClassInfo(const TargetRegisterClass &RC) const { 720 return RCInfos[getNumRegClasses() * HwMode + RC.getID()]; 721 } 722 723 public: 724 /// Register class iterators regclass_begin()725 regclass_iterator regclass_begin() const { return RegClassBegin; } regclass_end()726 regclass_iterator regclass_end() const { return RegClassEnd; } regclasses()727 iterator_range<regclass_iterator> regclasses() const { 728 return make_range(regclass_begin(), regclass_end()); 729 } 730 getNumRegClasses()731 unsigned getNumRegClasses() const { 732 return (unsigned)(regclass_end()-regclass_begin()); 733 } 734 735 /// Returns the register class associated with the enumeration value. 736 /// See class MCOperandInfo. getRegClass(unsigned i)737 const TargetRegisterClass *getRegClass(unsigned i) const { 738 assert(i < getNumRegClasses() && "Register Class ID out of range"); 739 return RegClassBegin[i]; 740 } 741 742 /// Returns the name of the register class. getRegClassName(const TargetRegisterClass * Class)743 const char *getRegClassName(const TargetRegisterClass *Class) const { 744 return MCRegisterInfo::getRegClassName(Class->MC); 745 } 746 747 /// Find the largest common subclass of A and B. 748 /// Return NULL if there is no common subclass. 749 const TargetRegisterClass * 750 getCommonSubClass(const TargetRegisterClass *A, 751 const TargetRegisterClass *B) const; 752 753 /// Returns a TargetRegisterClass used for pointer values. 754 /// If a target supports multiple different pointer register classes, 755 /// kind specifies which one is indicated. 756 virtual const TargetRegisterClass * 757 getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const { 758 llvm_unreachable("Target didn't implement getPointerRegClass!"); 759 } 760 761 /// Returns a legal register class to copy a register in the specified class 762 /// to or from. If it is possible to copy the register directly without using 763 /// a cross register class copy, return the specified RC. Returns NULL if it 764 /// is not possible to copy between two registers of the specified class. 765 virtual const TargetRegisterClass * getCrossCopyRegClass(const TargetRegisterClass * RC)766 getCrossCopyRegClass(const TargetRegisterClass *RC) const { 767 return RC; 768 } 769 770 /// Returns the largest super class of RC that is legal to use in the current 771 /// sub-target and has the same spill size. 772 /// The returned register class can be used to create virtual registers which 773 /// means that all its registers can be copied and spilled. 774 virtual const TargetRegisterClass * getLargestLegalSuperClass(const TargetRegisterClass * RC,const MachineFunction &)775 getLargestLegalSuperClass(const TargetRegisterClass *RC, 776 const MachineFunction &) const { 777 /// The default implementation is very conservative and doesn't allow the 778 /// register allocator to inflate register classes. 779 return RC; 780 } 781 782 /// Return the register pressure "high water mark" for the specific register 783 /// class. The scheduler is in high register pressure mode (for the specific 784 /// register class) if it goes over the limit. 785 /// 786 /// Note: this is the old register pressure model that relies on a manually 787 /// specified representative register class per value type. getRegPressureLimit(const TargetRegisterClass * RC,MachineFunction & MF)788 virtual unsigned getRegPressureLimit(const TargetRegisterClass *RC, 789 MachineFunction &MF) const { 790 return 0; 791 } 792 793 /// Return a heuristic for the machine scheduler to compare the profitability 794 /// of increasing one register pressure set versus another. The scheduler 795 /// will prefer increasing the register pressure of the set which returns 796 /// the largest value for this function. getRegPressureSetScore(const MachineFunction & MF,unsigned PSetID)797 virtual unsigned getRegPressureSetScore(const MachineFunction &MF, 798 unsigned PSetID) const { 799 return PSetID; 800 } 801 802 /// Get the weight in units of pressure for this register class. 803 virtual const RegClassWeight &getRegClassWeight( 804 const TargetRegisterClass *RC) const = 0; 805 806 /// Returns size in bits of a phys/virtual/generic register. 807 unsigned getRegSizeInBits(Register Reg, const MachineRegisterInfo &MRI) const; 808 809 /// Get the weight in units of pressure for this register unit. 810 virtual unsigned getRegUnitWeight(unsigned RegUnit) const = 0; 811 812 /// Get the number of dimensions of register pressure. 813 virtual unsigned getNumRegPressureSets() const = 0; 814 815 /// Get the name of this register unit pressure set. 816 virtual const char *getRegPressureSetName(unsigned Idx) const = 0; 817 818 /// Get the register unit pressure limit for this dimension. 819 /// This limit must be adjusted dynamically for reserved registers. 820 virtual unsigned getRegPressureSetLimit(const MachineFunction &MF, 821 unsigned Idx) const = 0; 822 823 /// Get the dimensions of register pressure impacted by this register class. 824 /// Returns a -1 terminated array of pressure set IDs. 825 virtual const int *getRegClassPressureSets( 826 const TargetRegisterClass *RC) const = 0; 827 828 /// Get the dimensions of register pressure impacted by this register unit. 829 /// Returns a -1 terminated array of pressure set IDs. 830 virtual const int *getRegUnitPressureSets(unsigned RegUnit) const = 0; 831 832 /// Get a list of 'hint' registers that the register allocator should try 833 /// first when allocating a physical register for the virtual register 834 /// VirtReg. These registers are effectively moved to the front of the 835 /// allocation order. If true is returned, regalloc will try to only use 836 /// hints to the greatest extent possible even if it means spilling. 837 /// 838 /// The Order argument is the allocation order for VirtReg's register class 839 /// as returned from RegisterClassInfo::getOrder(). The hint registers must 840 /// come from Order, and they must not be reserved. 841 /// 842 /// The default implementation of this function will only add target 843 /// independent register allocation hints. Targets that override this 844 /// function should typically call this default implementation as well and 845 /// expect to see generic copy hints added. 846 virtual bool 847 getRegAllocationHints(Register VirtReg, ArrayRef<MCPhysReg> Order, 848 SmallVectorImpl<MCPhysReg> &Hints, 849 const MachineFunction &MF, 850 const VirtRegMap *VRM = nullptr, 851 const LiveRegMatrix *Matrix = nullptr) const; 852 853 /// A callback to allow target a chance to update register allocation hints 854 /// when a register is "changed" (e.g. coalesced) to another register. 855 /// e.g. On ARM, some virtual registers should target register pairs, 856 /// if one of pair is coalesced to another register, the allocation hint of 857 /// the other half of the pair should be changed to point to the new register. updateRegAllocHint(Register Reg,Register NewReg,MachineFunction & MF)858 virtual void updateRegAllocHint(Register Reg, Register NewReg, 859 MachineFunction &MF) const { 860 // Do nothing. 861 } 862 863 /// Allow the target to reverse allocation order of local live ranges. This 864 /// will generally allocate shorter local live ranges first. For targets with 865 /// many registers, this could reduce regalloc compile time by a large 866 /// factor. It is disabled by default for three reasons: 867 /// (1) Top-down allocation is simpler and easier to debug for targets that 868 /// don't benefit from reversing the order. 869 /// (2) Bottom-up allocation could result in poor evicition decisions on some 870 /// targets affecting the performance of compiled code. 871 /// (3) Bottom-up allocation is no longer guaranteed to optimally color. reverseLocalAssignment()872 virtual bool reverseLocalAssignment() const { return false; } 873 874 /// Add the allocation priority to global and split ranges as well as the 875 /// local ranges when registers are added to the queue. addAllocPriorityToGlobalRanges()876 virtual bool addAllocPriorityToGlobalRanges() const { return false; } 877 878 /// Allow the target to override the cost of using a callee-saved register for 879 /// the first time. Default value of 0 means we will use a callee-saved 880 /// register if it is available. getCSRFirstUseCost()881 virtual unsigned getCSRFirstUseCost() const { return 0; } 882 883 /// Returns true if the target requires (and can make use of) the register 884 /// scavenger. requiresRegisterScavenging(const MachineFunction & MF)885 virtual bool requiresRegisterScavenging(const MachineFunction &MF) const { 886 return false; 887 } 888 889 /// Returns true if the target wants to use frame pointer based accesses to 890 /// spill to the scavenger emergency spill slot. useFPForScavengingIndex(const MachineFunction & MF)891 virtual bool useFPForScavengingIndex(const MachineFunction &MF) const { 892 return true; 893 } 894 895 /// Returns true if the target requires post PEI scavenging of registers for 896 /// materializing frame index constants. requiresFrameIndexScavenging(const MachineFunction & MF)897 virtual bool requiresFrameIndexScavenging(const MachineFunction &MF) const { 898 return false; 899 } 900 901 /// Returns true if the target requires using the RegScavenger directly for 902 /// frame elimination despite using requiresFrameIndexScavenging. requiresFrameIndexReplacementScavenging(const MachineFunction & MF)903 virtual bool requiresFrameIndexReplacementScavenging( 904 const MachineFunction &MF) const { 905 return false; 906 } 907 908 /// Returns true if the target wants the LocalStackAllocation pass to be run 909 /// and virtual base registers used for more efficient stack access. requiresVirtualBaseRegisters(const MachineFunction & MF)910 virtual bool requiresVirtualBaseRegisters(const MachineFunction &MF) const { 911 return false; 912 } 913 914 /// Return true if target has reserved a spill slot in the stack frame of 915 /// the given function for the specified register. e.g. On x86, if the frame 916 /// register is required, the first fixed stack object is reserved as its 917 /// spill slot. This tells PEI not to create a new stack frame 918 /// object for the given register. It should be called only after 919 /// determineCalleeSaves(). hasReservedSpillSlot(const MachineFunction & MF,Register Reg,int & FrameIdx)920 virtual bool hasReservedSpillSlot(const MachineFunction &MF, Register Reg, 921 int &FrameIdx) const { 922 return false; 923 } 924 925 /// Returns true if the live-ins should be tracked after register allocation. trackLivenessAfterRegAlloc(const MachineFunction & MF)926 virtual bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const { 927 return true; 928 } 929 930 /// True if the stack can be realigned for the target. 931 virtual bool canRealignStack(const MachineFunction &MF) const; 932 933 /// True if storage within the function requires the stack pointer to be 934 /// aligned more than the normal calling convention calls for. 935 virtual bool shouldRealignStack(const MachineFunction &MF) const; 936 937 /// True if stack realignment is required and still possible. hasStackRealignment(const MachineFunction & MF)938 bool hasStackRealignment(const MachineFunction &MF) const { 939 return shouldRealignStack(MF) && canRealignStack(MF); 940 } 941 942 /// Get the offset from the referenced frame index in the instruction, 943 /// if there is one. getFrameIndexInstrOffset(const MachineInstr * MI,int Idx)944 virtual int64_t getFrameIndexInstrOffset(const MachineInstr *MI, 945 int Idx) const { 946 return 0; 947 } 948 949 /// Returns true if the instruction's frame index reference would be better 950 /// served by a base register other than FP or SP. 951 /// Used by LocalStackFrameAllocation to determine which frame index 952 /// references it should create new base registers for. needsFrameBaseReg(MachineInstr * MI,int64_t Offset)953 virtual bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const { 954 return false; 955 } 956 957 /// Insert defining instruction(s) for a pointer to FrameIdx before 958 /// insertion point I. Return materialized frame pointer. materializeFrameBaseRegister(MachineBasicBlock * MBB,int FrameIdx,int64_t Offset)959 virtual Register materializeFrameBaseRegister(MachineBasicBlock *MBB, 960 int FrameIdx, 961 int64_t Offset) const { 962 llvm_unreachable("materializeFrameBaseRegister does not exist on this " 963 "target"); 964 } 965 966 /// Resolve a frame index operand of an instruction 967 /// to reference the indicated base register plus offset instead. resolveFrameIndex(MachineInstr & MI,Register BaseReg,int64_t Offset)968 virtual void resolveFrameIndex(MachineInstr &MI, Register BaseReg, 969 int64_t Offset) const { 970 llvm_unreachable("resolveFrameIndex does not exist on this target"); 971 } 972 973 /// Determine whether a given base register plus offset immediate is 974 /// encodable to resolve a frame index. isFrameOffsetLegal(const MachineInstr * MI,Register BaseReg,int64_t Offset)975 virtual bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, 976 int64_t Offset) const { 977 llvm_unreachable("isFrameOffsetLegal does not exist on this target"); 978 } 979 980 /// Gets the DWARF expression opcodes for \p Offset. 981 virtual void getOffsetOpcodes(const StackOffset &Offset, 982 SmallVectorImpl<uint64_t> &Ops) const; 983 984 /// Prepends a DWARF expression for \p Offset to DIExpression \p Expr. 985 DIExpression * 986 prependOffsetExpression(const DIExpression *Expr, unsigned PrependFlags, 987 const StackOffset &Offset) const; 988 989 /// Spill the register so it can be used by the register scavenger. 990 /// Return true if the register was spilled, false otherwise. 991 /// If this function does not spill the register, the scavenger 992 /// will instead spill it to the emergency spill slot. saveScavengerRegister(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,MachineBasicBlock::iterator & UseMI,const TargetRegisterClass * RC,Register Reg)993 virtual bool saveScavengerRegister(MachineBasicBlock &MBB, 994 MachineBasicBlock::iterator I, 995 MachineBasicBlock::iterator &UseMI, 996 const TargetRegisterClass *RC, 997 Register Reg) const { 998 return false; 999 } 1000 1001 /// This method must be overriden to eliminate abstract frame indices from 1002 /// instructions which may use them. The instruction referenced by the 1003 /// iterator contains an MO_FrameIndex operand which must be eliminated by 1004 /// this method. This method may modify or replace the specified instruction, 1005 /// as long as it keeps the iterator pointing at the finished product. 1006 /// SPAdj is the SP adjustment due to call frame setup instruction. 1007 /// FIOperandNum is the FI operand number. 1008 virtual void eliminateFrameIndex(MachineBasicBlock::iterator MI, 1009 int SPAdj, unsigned FIOperandNum, 1010 RegScavenger *RS = nullptr) const = 0; 1011 1012 /// Return the assembly name for \p Reg. getRegAsmName(MCRegister Reg)1013 virtual StringRef getRegAsmName(MCRegister Reg) const { 1014 // FIXME: We are assuming that the assembly name is equal to the TableGen 1015 // name converted to lower case 1016 // 1017 // The TableGen name is the name of the definition for this register in the 1018 // target's tablegen files. For example, the TableGen name of 1019 // def EAX : Register <...>; is "EAX" 1020 return StringRef(getName(Reg)); 1021 } 1022 1023 //===--------------------------------------------------------------------===// 1024 /// Subtarget Hooks 1025 1026 /// SrcRC and DstRC will be morphed into NewRC if this returns true. shouldCoalesce(MachineInstr * MI,const TargetRegisterClass * SrcRC,unsigned SubReg,const TargetRegisterClass * DstRC,unsigned DstSubReg,const TargetRegisterClass * NewRC,LiveIntervals & LIS)1027 virtual bool shouldCoalesce(MachineInstr *MI, 1028 const TargetRegisterClass *SrcRC, 1029 unsigned SubReg, 1030 const TargetRegisterClass *DstRC, 1031 unsigned DstSubReg, 1032 const TargetRegisterClass *NewRC, 1033 LiveIntervals &LIS) const 1034 { return true; } 1035 1036 /// Region split has a high compile time cost especially for large live range. 1037 /// This method is used to decide whether or not \p VirtReg should 1038 /// go through this expensive splitting heuristic. 1039 virtual bool shouldRegionSplitForVirtReg(const MachineFunction &MF, 1040 const LiveInterval &VirtReg) const; 1041 1042 /// Last chance recoloring has a high compile time cost especially for 1043 /// targets with a lot of registers. 1044 /// This method is used to decide whether or not \p VirtReg should 1045 /// go through this expensive heuristic. 1046 /// When this target hook is hit, by returning false, there is a high 1047 /// chance that the register allocation will fail altogether (usually with 1048 /// "ran out of registers"). 1049 /// That said, this error usually points to another problem in the 1050 /// optimization pipeline. 1051 virtual bool shouldUseLastChanceRecoloringForVirtReg(const MachineFunction & MF,const LiveInterval & VirtReg)1052 shouldUseLastChanceRecoloringForVirtReg(const MachineFunction &MF, 1053 const LiveInterval &VirtReg) const { 1054 return true; 1055 } 1056 1057 /// Deferred spilling delays the spill insertion of a virtual register 1058 /// after every other allocation. By deferring the spilling, it is 1059 /// sometimes possible to eliminate that spilling altogether because 1060 /// something else could have been eliminated, thus leaving some space 1061 /// for the virtual register. 1062 /// However, this comes with a compile time impact because it adds one 1063 /// more stage to the greedy register allocator. 1064 /// This method is used to decide whether \p VirtReg should use the deferred 1065 /// spilling stage instead of being spilled right away. 1066 virtual bool shouldUseDeferredSpillingForVirtReg(const MachineFunction & MF,const LiveInterval & VirtReg)1067 shouldUseDeferredSpillingForVirtReg(const MachineFunction &MF, 1068 const LiveInterval &VirtReg) const { 1069 return false; 1070 } 1071 1072 //===--------------------------------------------------------------------===// 1073 /// Debug information queries. 1074 1075 /// getFrameRegister - This method should return the register used as a base 1076 /// for values allocated in the current stack frame. 1077 virtual Register getFrameRegister(const MachineFunction &MF) const = 0; 1078 1079 /// Mark a register and all its aliases as reserved in the given set. 1080 void markSuperRegs(BitVector &RegisterSet, MCRegister Reg) const; 1081 1082 /// Returns true if for every register in the set all super registers are part 1083 /// of the set as well. 1084 bool checkAllSuperRegsMarked(const BitVector &RegisterSet, 1085 ArrayRef<MCPhysReg> Exceptions = ArrayRef<MCPhysReg>()) const; 1086 1087 virtual const TargetRegisterClass * getConstrainedRegClassForOperand(const MachineOperand & MO,const MachineRegisterInfo & MRI)1088 getConstrainedRegClassForOperand(const MachineOperand &MO, 1089 const MachineRegisterInfo &MRI) const { 1090 return nullptr; 1091 } 1092 1093 /// Returns the physical register number of sub-register "Index" 1094 /// for physical register RegNo. Return zero if the sub-register does not 1095 /// exist. getSubReg(MCRegister Reg,unsigned Idx)1096 inline MCRegister getSubReg(MCRegister Reg, unsigned Idx) const { 1097 return static_cast<const MCRegisterInfo *>(this)->getSubReg(Reg, Idx); 1098 } 1099 }; 1100 1101 //===----------------------------------------------------------------------===// 1102 // SuperRegClassIterator 1103 //===----------------------------------------------------------------------===// 1104 // 1105 // Iterate over the possible super-registers for a given register class. The 1106 // iterator will visit a list of pairs (Idx, Mask) corresponding to the 1107 // possible classes of super-registers. 1108 // 1109 // Each bit mask will have at least one set bit, and each set bit in Mask 1110 // corresponds to a SuperRC such that: 1111 // 1112 // For all Reg in SuperRC: Reg:Idx is in RC. 1113 // 1114 // The iterator can include (O, RC->getSubClassMask()) as the first entry which 1115 // also satisfies the above requirement, assuming Reg:0 == Reg. 1116 // 1117 class SuperRegClassIterator { 1118 const unsigned RCMaskWords; 1119 unsigned SubReg = 0; 1120 const uint16_t *Idx; 1121 const uint32_t *Mask; 1122 1123 public: 1124 /// Create a SuperRegClassIterator that visits all the super-register classes 1125 /// of RC. When IncludeSelf is set, also include the (0, sub-classes) entry. 1126 SuperRegClassIterator(const TargetRegisterClass *RC, 1127 const TargetRegisterInfo *TRI, 1128 bool IncludeSelf = false) 1129 : RCMaskWords((TRI->getNumRegClasses() + 31) / 32), 1130 Idx(RC->getSuperRegIndices()), Mask(RC->getSubClassMask()) { 1131 if (!IncludeSelf) 1132 ++*this; 1133 } 1134 1135 /// Returns true if this iterator is still pointing at a valid entry. isValid()1136 bool isValid() const { return Idx; } 1137 1138 /// Returns the current sub-register index. getSubReg()1139 unsigned getSubReg() const { return SubReg; } 1140 1141 /// Returns the bit mask of register classes that getSubReg() projects into 1142 /// RC. 1143 /// See TargetRegisterClass::getSubClassMask() for how to use it. getMask()1144 const uint32_t *getMask() const { return Mask; } 1145 1146 /// Advance iterator to the next entry. 1147 void operator++() { 1148 assert(isValid() && "Cannot move iterator past end."); 1149 Mask += RCMaskWords; 1150 SubReg = *Idx++; 1151 if (!SubReg) 1152 Idx = nullptr; 1153 } 1154 }; 1155 1156 //===----------------------------------------------------------------------===// 1157 // BitMaskClassIterator 1158 //===----------------------------------------------------------------------===// 1159 /// This class encapuslates the logic to iterate over bitmask returned by 1160 /// the various RegClass related APIs. 1161 /// E.g., this class can be used to iterate over the subclasses provided by 1162 /// TargetRegisterClass::getSubClassMask or SuperRegClassIterator::getMask. 1163 class BitMaskClassIterator { 1164 /// Total number of register classes. 1165 const unsigned NumRegClasses; 1166 /// Base index of CurrentChunk. 1167 /// In other words, the number of bit we read to get at the 1168 /// beginning of that chunck. 1169 unsigned Base = 0; 1170 /// Adjust base index of CurrentChunk. 1171 /// Base index + how many bit we read within CurrentChunk. 1172 unsigned Idx = 0; 1173 /// Current register class ID. 1174 unsigned ID = 0; 1175 /// Mask we are iterating over. 1176 const uint32_t *Mask; 1177 /// Current chunk of the Mask we are traversing. 1178 uint32_t CurrentChunk; 1179 1180 /// Move ID to the next set bit. moveToNextID()1181 void moveToNextID() { 1182 // If the current chunk of memory is empty, move to the next one, 1183 // while making sure we do not go pass the number of register 1184 // classes. 1185 while (!CurrentChunk) { 1186 // Move to the next chunk. 1187 Base += 32; 1188 if (Base >= NumRegClasses) { 1189 ID = NumRegClasses; 1190 return; 1191 } 1192 CurrentChunk = *++Mask; 1193 Idx = Base; 1194 } 1195 // Otherwise look for the first bit set from the right 1196 // (representation of the class ID is big endian). 1197 // See getSubClassMask for more details on the representation. 1198 unsigned Offset = countTrailingZeros(CurrentChunk); 1199 // Add the Offset to the adjusted base number of this chunk: Idx. 1200 // This is the ID of the register class. 1201 ID = Idx + Offset; 1202 1203 // Consume the zeros, if any, and the bit we just read 1204 // so that we are at the right spot for the next call. 1205 // Do not do Offset + 1 because Offset may be 31 and 32 1206 // will be UB for the shift, though in that case we could 1207 // have make the chunk being equal to 0, but that would 1208 // have introduced a if statement. 1209 moveNBits(Offset); 1210 moveNBits(1); 1211 } 1212 1213 /// Move \p NumBits Bits forward in CurrentChunk. moveNBits(unsigned NumBits)1214 void moveNBits(unsigned NumBits) { 1215 assert(NumBits < 32 && "Undefined behavior spotted!"); 1216 // Consume the bit we read for the next call. 1217 CurrentChunk >>= NumBits; 1218 // Adjust the base for the chunk. 1219 Idx += NumBits; 1220 } 1221 1222 public: 1223 /// Create a BitMaskClassIterator that visits all the register classes 1224 /// represented by \p Mask. 1225 /// 1226 /// \pre \p Mask != nullptr BitMaskClassIterator(const uint32_t * Mask,const TargetRegisterInfo & TRI)1227 BitMaskClassIterator(const uint32_t *Mask, const TargetRegisterInfo &TRI) 1228 : NumRegClasses(TRI.getNumRegClasses()), Mask(Mask), CurrentChunk(*Mask) { 1229 // Move to the first ID. 1230 moveToNextID(); 1231 } 1232 1233 /// Returns true if this iterator is still pointing at a valid entry. isValid()1234 bool isValid() const { return getID() != NumRegClasses; } 1235 1236 /// Returns the current register class ID. getID()1237 unsigned getID() const { return ID; } 1238 1239 /// Advance iterator to the next entry. 1240 void operator++() { 1241 assert(isValid() && "Cannot move iterator past end."); 1242 moveToNextID(); 1243 } 1244 }; 1245 1246 // This is useful when building IndexedMaps keyed on virtual registers 1247 struct VirtReg2IndexFunctor { 1248 using argument_type = Register; operatorVirtReg2IndexFunctor1249 unsigned operator()(Register Reg) const { 1250 return Register::virtReg2Index(Reg); 1251 } 1252 }; 1253 1254 /// Prints virtual and physical registers with or without a TRI instance. 1255 /// 1256 /// The format is: 1257 /// %noreg - NoRegister 1258 /// %5 - a virtual register. 1259 /// %5:sub_8bit - a virtual register with sub-register index (with TRI). 1260 /// %eax - a physical register 1261 /// %physreg17 - a physical register when no TRI instance given. 1262 /// 1263 /// Usage: OS << printReg(Reg, TRI, SubRegIdx) << '\n'; 1264 Printable printReg(Register Reg, const TargetRegisterInfo *TRI = nullptr, 1265 unsigned SubIdx = 0, 1266 const MachineRegisterInfo *MRI = nullptr); 1267 1268 /// Create Printable object to print register units on a \ref raw_ostream. 1269 /// 1270 /// Register units are named after their root registers: 1271 /// 1272 /// al - Single root. 1273 /// fp0~st7 - Dual roots. 1274 /// 1275 /// Usage: OS << printRegUnit(Unit, TRI) << '\n'; 1276 Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI); 1277 1278 /// Create Printable object to print virtual registers and physical 1279 /// registers on a \ref raw_ostream. 1280 Printable printVRegOrUnit(unsigned VRegOrUnit, const TargetRegisterInfo *TRI); 1281 1282 /// Create Printable object to print register classes or register banks 1283 /// on a \ref raw_ostream. 1284 Printable printRegClassOrBank(Register Reg, const MachineRegisterInfo &RegInfo, 1285 const TargetRegisterInfo *TRI); 1286 1287 } // end namespace llvm 1288 1289 #endif // LLVM_CODEGEN_TARGETREGISTERINFO_H 1290