1 //===-- ARMBaseRegisterInfo.cpp - ARM Register Information ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the base ARM implementation of TargetRegisterInfo class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "ARMBaseRegisterInfo.h" 14 #include "ARM.h" 15 #include "ARMBaseInstrInfo.h" 16 #include "ARMFrameLowering.h" 17 #include "ARMMachineFunctionInfo.h" 18 #include "ARMSubtarget.h" 19 #include "MCTargetDesc/ARMAddressingModes.h" 20 #include "MCTargetDesc/ARMBaseInfo.h" 21 #include "llvm/ADT/BitVector.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/CodeGen/MachineBasicBlock.h" 25 #include "llvm/CodeGen/MachineConstantPool.h" 26 #include "llvm/CodeGen/MachineFrameInfo.h" 27 #include "llvm/CodeGen/MachineFunction.h" 28 #include "llvm/CodeGen/MachineInstr.h" 29 #include "llvm/CodeGen/MachineInstrBuilder.h" 30 #include "llvm/CodeGen/MachineOperand.h" 31 #include "llvm/CodeGen/MachineRegisterInfo.h" 32 #include "llvm/CodeGen/RegisterScavenging.h" 33 #include "llvm/CodeGen/TargetInstrInfo.h" 34 #include "llvm/CodeGen/TargetRegisterInfo.h" 35 #include "llvm/CodeGen/VirtRegMap.h" 36 #include "llvm/IR/Attributes.h" 37 #include "llvm/IR/Constants.h" 38 #include "llvm/IR/DebugLoc.h" 39 #include "llvm/IR/Function.h" 40 #include "llvm/IR/Type.h" 41 #include "llvm/MC/MCInstrDesc.h" 42 #include "llvm/Support/Debug.h" 43 #include "llvm/Support/ErrorHandling.h" 44 #include "llvm/Support/raw_ostream.h" 45 #include "llvm/Target/TargetMachine.h" 46 #include "llvm/Target/TargetOptions.h" 47 #include <cassert> 48 #include <utility> 49 50 #define DEBUG_TYPE "arm-register-info" 51 52 #define GET_REGINFO_TARGET_DESC 53 #include "ARMGenRegisterInfo.inc" 54 55 using namespace llvm; 56 57 ARMBaseRegisterInfo::ARMBaseRegisterInfo() 58 : ARMGenRegisterInfo(ARM::LR, 0, 0, ARM::PC) {} 59 60 static unsigned getFramePointerReg(const ARMSubtarget &STI) { 61 return STI.useR7AsFramePointer() ? ARM::R7 : ARM::R11; 62 } 63 64 const MCPhysReg* 65 ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 66 const ARMSubtarget &STI = MF->getSubtarget<ARMSubtarget>(); 67 bool UseSplitPush = STI.splitFramePushPop(*MF); 68 const MCPhysReg *RegList = 69 STI.isTargetDarwin() 70 ? CSR_iOS_SaveList 71 : (UseSplitPush ? CSR_AAPCS_SplitPush_SaveList : CSR_AAPCS_SaveList); 72 73 const Function &F = MF->getFunction(); 74 if (F.getCallingConv() == CallingConv::GHC) { 75 // GHC set of callee saved regs is empty as all those regs are 76 // used for passing STG regs around 77 return CSR_NoRegs_SaveList; 78 } else if (F.hasFnAttribute("interrupt")) { 79 if (STI.isMClass()) { 80 // M-class CPUs have hardware which saves the registers needed to allow a 81 // function conforming to the AAPCS to function as a handler. 82 return UseSplitPush ? CSR_AAPCS_SplitPush_SaveList : CSR_AAPCS_SaveList; 83 } else if (F.getFnAttribute("interrupt").getValueAsString() == "FIQ") { 84 // Fast interrupt mode gives the handler a private copy of R8-R14, so less 85 // need to be saved to restore user-mode state. 86 return CSR_FIQ_SaveList; 87 } else { 88 // Generally only R13-R14 (i.e. SP, LR) are automatically preserved by 89 // exception handling. 90 return CSR_GenericInt_SaveList; 91 } 92 } 93 94 if (STI.getTargetLowering()->supportSwiftError() && 95 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError)) { 96 if (STI.isTargetDarwin()) 97 return CSR_iOS_SwiftError_SaveList; 98 99 return UseSplitPush ? CSR_AAPCS_SplitPush_SwiftError_SaveList : 100 CSR_AAPCS_SwiftError_SaveList; 101 } 102 103 if (STI.isTargetDarwin() && F.getCallingConv() == CallingConv::CXX_FAST_TLS) 104 return MF->getInfo<ARMFunctionInfo>()->isSplitCSR() 105 ? CSR_iOS_CXX_TLS_PE_SaveList 106 : CSR_iOS_CXX_TLS_SaveList; 107 return RegList; 108 } 109 110 const MCPhysReg *ARMBaseRegisterInfo::getCalleeSavedRegsViaCopy( 111 const MachineFunction *MF) const { 112 assert(MF && "Invalid MachineFunction pointer."); 113 if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS && 114 MF->getInfo<ARMFunctionInfo>()->isSplitCSR()) 115 return CSR_iOS_CXX_TLS_ViaCopy_SaveList; 116 return nullptr; 117 } 118 119 const uint32_t * 120 ARMBaseRegisterInfo::getCallPreservedMask(const MachineFunction &MF, 121 CallingConv::ID CC) const { 122 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 123 if (CC == CallingConv::GHC) 124 // This is academic because all GHC calls are (supposed to be) tail calls 125 return CSR_NoRegs_RegMask; 126 127 if (STI.getTargetLowering()->supportSwiftError() && 128 MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError)) 129 return STI.isTargetDarwin() ? CSR_iOS_SwiftError_RegMask 130 : CSR_AAPCS_SwiftError_RegMask; 131 132 if (STI.isTargetDarwin() && CC == CallingConv::CXX_FAST_TLS) 133 return CSR_iOS_CXX_TLS_RegMask; 134 return STI.isTargetDarwin() ? CSR_iOS_RegMask : CSR_AAPCS_RegMask; 135 } 136 137 const uint32_t* 138 ARMBaseRegisterInfo::getNoPreservedMask() const { 139 return CSR_NoRegs_RegMask; 140 } 141 142 const uint32_t * 143 ARMBaseRegisterInfo::getTLSCallPreservedMask(const MachineFunction &MF) const { 144 assert(MF.getSubtarget<ARMSubtarget>().isTargetDarwin() && 145 "only know about special TLS call on Darwin"); 146 return CSR_iOS_TLSCall_RegMask; 147 } 148 149 const uint32_t * 150 ARMBaseRegisterInfo::getSjLjDispatchPreservedMask(const MachineFunction &MF) const { 151 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 152 if (!STI.useSoftFloat() && STI.hasVFP2Base() && !STI.isThumb1Only()) 153 return CSR_NoRegs_RegMask; 154 else 155 return CSR_FPRegs_RegMask; 156 } 157 158 const uint32_t * 159 ARMBaseRegisterInfo::getThisReturnPreservedMask(const MachineFunction &MF, 160 CallingConv::ID CC) const { 161 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 162 // This should return a register mask that is the same as that returned by 163 // getCallPreservedMask but that additionally preserves the register used for 164 // the first i32 argument (which must also be the register used to return a 165 // single i32 return value) 166 // 167 // In case that the calling convention does not use the same register for 168 // both or otherwise does not want to enable this optimization, the function 169 // should return NULL 170 if (CC == CallingConv::GHC) 171 // This is academic because all GHC calls are (supposed to be) tail calls 172 return nullptr; 173 return STI.isTargetDarwin() ? CSR_iOS_ThisReturn_RegMask 174 : CSR_AAPCS_ThisReturn_RegMask; 175 } 176 177 BitVector ARMBaseRegisterInfo:: 178 getReservedRegs(const MachineFunction &MF) const { 179 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 180 const ARMFrameLowering *TFI = getFrameLowering(MF); 181 182 // FIXME: avoid re-calculating this every time. 183 BitVector Reserved(getNumRegs()); 184 markSuperRegs(Reserved, ARM::SP); 185 markSuperRegs(Reserved, ARM::PC); 186 markSuperRegs(Reserved, ARM::FPSCR); 187 markSuperRegs(Reserved, ARM::APSR_NZCV); 188 if (TFI->hasFP(MF)) 189 markSuperRegs(Reserved, getFramePointerReg(STI)); 190 if (hasBasePointer(MF)) 191 markSuperRegs(Reserved, BasePtr); 192 // Some targets reserve R9. 193 if (STI.isR9Reserved()) 194 markSuperRegs(Reserved, ARM::R9); 195 // Reserve D16-D31 if the subtarget doesn't support them. 196 if (!STI.hasD32()) { 197 static_assert(ARM::D31 == ARM::D16 + 15, "Register list not consecutive!"); 198 for (unsigned R = 0; R < 16; ++R) 199 markSuperRegs(Reserved, ARM::D16 + R); 200 } 201 const TargetRegisterClass &RC = ARM::GPRPairRegClass; 202 for (unsigned Reg : RC) 203 for (MCSubRegIterator SI(Reg, this); SI.isValid(); ++SI) 204 if (Reserved.test(*SI)) 205 markSuperRegs(Reserved, Reg); 206 // For v8.1m architecture 207 markSuperRegs(Reserved, ARM::ZR); 208 209 assert(checkAllSuperRegsMarked(Reserved)); 210 return Reserved; 211 } 212 213 bool ARMBaseRegisterInfo:: 214 isAsmClobberable(const MachineFunction &MF, unsigned PhysReg) const { 215 return !getReservedRegs(MF).test(PhysReg); 216 } 217 218 const TargetRegisterClass * 219 ARMBaseRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC, 220 const MachineFunction &) const { 221 const TargetRegisterClass *Super = RC; 222 TargetRegisterClass::sc_iterator I = RC->getSuperClasses(); 223 do { 224 switch (Super->getID()) { 225 case ARM::GPRRegClassID: 226 case ARM::SPRRegClassID: 227 case ARM::DPRRegClassID: 228 case ARM::QPRRegClassID: 229 case ARM::QQPRRegClassID: 230 case ARM::QQQQPRRegClassID: 231 case ARM::GPRPairRegClassID: 232 return Super; 233 } 234 Super = *I++; 235 } while (Super); 236 return RC; 237 } 238 239 const TargetRegisterClass * 240 ARMBaseRegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind) 241 const { 242 return &ARM::GPRRegClass; 243 } 244 245 const TargetRegisterClass * 246 ARMBaseRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { 247 if (RC == &ARM::CCRRegClass) 248 return &ARM::rGPRRegClass; // Can't copy CCR registers. 249 return RC; 250 } 251 252 unsigned 253 ARMBaseRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, 254 MachineFunction &MF) const { 255 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 256 const ARMFrameLowering *TFI = getFrameLowering(MF); 257 258 switch (RC->getID()) { 259 default: 260 return 0; 261 case ARM::tGPRRegClassID: { 262 // hasFP ends up calling getMaxCallFrameComputed() which may not be 263 // available when getPressureLimit() is called as part of 264 // ScheduleDAGRRList. 265 bool HasFP = MF.getFrameInfo().isMaxCallFrameSizeComputed() 266 ? TFI->hasFP(MF) : true; 267 return 5 - HasFP; 268 } 269 case ARM::GPRRegClassID: { 270 bool HasFP = MF.getFrameInfo().isMaxCallFrameSizeComputed() 271 ? TFI->hasFP(MF) : true; 272 return 10 - HasFP - (STI.isR9Reserved() ? 1 : 0); 273 } 274 case ARM::SPRRegClassID: // Currently not used as 'rep' register class. 275 case ARM::DPRRegClassID: 276 return 32 - 10; 277 } 278 } 279 280 // Get the other register in a GPRPair. 281 static unsigned getPairedGPR(unsigned Reg, bool Odd, const MCRegisterInfo *RI) { 282 for (MCSuperRegIterator Supers(Reg, RI); Supers.isValid(); ++Supers) 283 if (ARM::GPRPairRegClass.contains(*Supers)) 284 return RI->getSubReg(*Supers, Odd ? ARM::gsub_1 : ARM::gsub_0); 285 return 0; 286 } 287 288 // Resolve the RegPairEven / RegPairOdd register allocator hints. 289 bool 290 ARMBaseRegisterInfo::getRegAllocationHints(unsigned VirtReg, 291 ArrayRef<MCPhysReg> Order, 292 SmallVectorImpl<MCPhysReg> &Hints, 293 const MachineFunction &MF, 294 const VirtRegMap *VRM, 295 const LiveRegMatrix *Matrix) const { 296 const MachineRegisterInfo &MRI = MF.getRegInfo(); 297 std::pair<unsigned, unsigned> Hint = MRI.getRegAllocationHint(VirtReg); 298 299 unsigned Odd; 300 switch (Hint.first) { 301 case ARMRI::RegPairEven: 302 Odd = 0; 303 break; 304 case ARMRI::RegPairOdd: 305 Odd = 1; 306 break; 307 default: 308 TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF, VRM); 309 return false; 310 } 311 312 // This register should preferably be even (Odd == 0) or odd (Odd == 1). 313 // Check if the other part of the pair has already been assigned, and provide 314 // the paired register as the first hint. 315 unsigned Paired = Hint.second; 316 if (Paired == 0) 317 return false; 318 319 unsigned PairedPhys = 0; 320 if (TargetRegisterInfo::isPhysicalRegister(Paired)) { 321 PairedPhys = Paired; 322 } else if (VRM && VRM->hasPhys(Paired)) { 323 PairedPhys = getPairedGPR(VRM->getPhys(Paired), Odd, this); 324 } 325 326 // First prefer the paired physreg. 327 if (PairedPhys && is_contained(Order, PairedPhys)) 328 Hints.push_back(PairedPhys); 329 330 // Then prefer even or odd registers. 331 for (unsigned Reg : Order) { 332 if (Reg == PairedPhys || (getEncodingValue(Reg) & 1) != Odd) 333 continue; 334 // Don't provide hints that are paired to a reserved register. 335 unsigned Paired = getPairedGPR(Reg, !Odd, this); 336 if (!Paired || MRI.isReserved(Paired)) 337 continue; 338 Hints.push_back(Reg); 339 } 340 return false; 341 } 342 343 void 344 ARMBaseRegisterInfo::updateRegAllocHint(unsigned Reg, unsigned NewReg, 345 MachineFunction &MF) const { 346 MachineRegisterInfo *MRI = &MF.getRegInfo(); 347 std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(Reg); 348 if ((Hint.first == (unsigned)ARMRI::RegPairOdd || 349 Hint.first == (unsigned)ARMRI::RegPairEven) && 350 TargetRegisterInfo::isVirtualRegister(Hint.second)) { 351 // If 'Reg' is one of the even / odd register pair and it's now changed 352 // (e.g. coalesced) into a different register. The other register of the 353 // pair allocation hint must be updated to reflect the relationship 354 // change. 355 unsigned OtherReg = Hint.second; 356 Hint = MRI->getRegAllocationHint(OtherReg); 357 // Make sure the pair has not already divorced. 358 if (Hint.second == Reg) { 359 MRI->setRegAllocationHint(OtherReg, Hint.first, NewReg); 360 if (TargetRegisterInfo::isVirtualRegister(NewReg)) 361 MRI->setRegAllocationHint(NewReg, 362 Hint.first == (unsigned)ARMRI::RegPairOdd ? ARMRI::RegPairEven 363 : ARMRI::RegPairOdd, OtherReg); 364 } 365 } 366 } 367 368 bool ARMBaseRegisterInfo::hasBasePointer(const MachineFunction &MF) const { 369 const MachineFrameInfo &MFI = MF.getFrameInfo(); 370 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 371 const ARMFrameLowering *TFI = getFrameLowering(MF); 372 373 // When outgoing call frames are so large that we adjust the stack pointer 374 // around the call, we can no longer use the stack pointer to reach the 375 // emergency spill slot. 376 if (needsStackRealignment(MF) && !TFI->hasReservedCallFrame(MF)) 377 return true; 378 379 // Thumb has trouble with negative offsets from the FP. Thumb2 has a limited 380 // negative range for ldr/str (255), and thumb1 is positive offsets only. 381 // It's going to be better to use the SP or Base Pointer instead. When there 382 // are variable sized objects, we can't reference off of the SP, so we 383 // reserve a Base Pointer. 384 if (AFI->isThumbFunction() && MFI.hasVarSizedObjects()) { 385 // Conservatively estimate whether the negative offset from the frame 386 // pointer will be sufficient to reach. If a function has a smallish 387 // frame, it's less likely to have lots of spills and callee saved 388 // space, so it's all more likely to be within range of the frame pointer. 389 // If it's wrong, the scavenger will still enable access to work, it just 390 // won't be optimal. 391 if (AFI->isThumb2Function() && MFI.getLocalFrameSize() < 128) 392 return false; 393 return true; 394 } 395 396 return false; 397 } 398 399 bool ARMBaseRegisterInfo::canRealignStack(const MachineFunction &MF) const { 400 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 401 const ARMFrameLowering *TFI = getFrameLowering(MF); 402 // We can't realign the stack if: 403 // 1. Dynamic stack realignment is explicitly disabled, 404 // 2. There are VLAs in the function and the base pointer is disabled. 405 if (!TargetRegisterInfo::canRealignStack(MF)) 406 return false; 407 // Stack realignment requires a frame pointer. If we already started 408 // register allocation with frame pointer elimination, it is too late now. 409 if (!MRI->canReserveReg(getFramePointerReg(MF.getSubtarget<ARMSubtarget>()))) 410 return false; 411 // We may also need a base pointer if there are dynamic allocas or stack 412 // pointer adjustments around calls. 413 if (TFI->hasReservedCallFrame(MF)) 414 return true; 415 // A base pointer is required and allowed. Check that it isn't too late to 416 // reserve it. 417 return MRI->canReserveReg(BasePtr); 418 } 419 420 bool ARMBaseRegisterInfo:: 421 cannotEliminateFrame(const MachineFunction &MF) const { 422 const MachineFrameInfo &MFI = MF.getFrameInfo(); 423 if (MF.getTarget().Options.DisableFramePointerElim(MF) && MFI.adjustsStack()) 424 return true; 425 return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() 426 || needsStackRealignment(MF); 427 } 428 429 unsigned 430 ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const { 431 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 432 const ARMFrameLowering *TFI = getFrameLowering(MF); 433 434 if (TFI->hasFP(MF)) 435 return getFramePointerReg(STI); 436 return ARM::SP; 437 } 438 439 /// emitLoadConstPool - Emits a load from constpool to materialize the 440 /// specified immediate. 441 void ARMBaseRegisterInfo::emitLoadConstPool( 442 MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 443 const DebugLoc &dl, unsigned DestReg, unsigned SubIdx, int Val, 444 ARMCC::CondCodes Pred, unsigned PredReg, unsigned MIFlags) const { 445 MachineFunction &MF = *MBB.getParent(); 446 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 447 MachineConstantPool *ConstantPool = MF.getConstantPool(); 448 const Constant *C = 449 ConstantInt::get(Type::getInt32Ty(MF.getFunction().getContext()), Val); 450 unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4); 451 452 BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp)) 453 .addReg(DestReg, getDefRegState(true), SubIdx) 454 .addConstantPoolIndex(Idx) 455 .addImm(0) 456 .add(predOps(Pred, PredReg)) 457 .setMIFlags(MIFlags); 458 } 459 460 bool ARMBaseRegisterInfo:: 461 requiresRegisterScavenging(const MachineFunction &MF) const { 462 return true; 463 } 464 465 bool ARMBaseRegisterInfo:: 466 trackLivenessAfterRegAlloc(const MachineFunction &MF) const { 467 return true; 468 } 469 470 bool ARMBaseRegisterInfo:: 471 requiresFrameIndexScavenging(const MachineFunction &MF) const { 472 return true; 473 } 474 475 bool ARMBaseRegisterInfo:: 476 requiresVirtualBaseRegisters(const MachineFunction &MF) const { 477 return true; 478 } 479 480 int64_t ARMBaseRegisterInfo:: 481 getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const { 482 const MCInstrDesc &Desc = MI->getDesc(); 483 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); 484 int64_t InstrOffs = 0; 485 int Scale = 1; 486 unsigned ImmIdx = 0; 487 switch (AddrMode) { 488 case ARMII::AddrModeT2_i8: 489 case ARMII::AddrModeT2_i12: 490 case ARMII::AddrMode_i12: 491 InstrOffs = MI->getOperand(Idx+1).getImm(); 492 Scale = 1; 493 break; 494 case ARMII::AddrMode5: { 495 // VFP address mode. 496 const MachineOperand &OffOp = MI->getOperand(Idx+1); 497 InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm()); 498 if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub) 499 InstrOffs = -InstrOffs; 500 Scale = 4; 501 break; 502 } 503 case ARMII::AddrMode2: 504 ImmIdx = Idx+2; 505 InstrOffs = ARM_AM::getAM2Offset(MI->getOperand(ImmIdx).getImm()); 506 if (ARM_AM::getAM2Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub) 507 InstrOffs = -InstrOffs; 508 break; 509 case ARMII::AddrMode3: 510 ImmIdx = Idx+2; 511 InstrOffs = ARM_AM::getAM3Offset(MI->getOperand(ImmIdx).getImm()); 512 if (ARM_AM::getAM3Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub) 513 InstrOffs = -InstrOffs; 514 break; 515 case ARMII::AddrModeT1_s: 516 ImmIdx = Idx+1; 517 InstrOffs = MI->getOperand(ImmIdx).getImm(); 518 Scale = 4; 519 break; 520 default: 521 llvm_unreachable("Unsupported addressing mode!"); 522 } 523 524 return InstrOffs * Scale; 525 } 526 527 /// needsFrameBaseReg - Returns true if the instruction's frame index 528 /// reference would be better served by a base register other than FP 529 /// or SP. Used by LocalStackFrameAllocation to determine which frame index 530 /// references it should create new base registers for. 531 bool ARMBaseRegisterInfo:: 532 needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const { 533 for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i) { 534 assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!"); 535 } 536 537 // It's the load/store FI references that cause issues, as it can be difficult 538 // to materialize the offset if it won't fit in the literal field. Estimate 539 // based on the size of the local frame and some conservative assumptions 540 // about the rest of the stack frame (note, this is pre-regalloc, so 541 // we don't know everything for certain yet) whether this offset is likely 542 // to be out of range of the immediate. Return true if so. 543 544 // We only generate virtual base registers for loads and stores, so 545 // return false for everything else. 546 unsigned Opc = MI->getOpcode(); 547 switch (Opc) { 548 case ARM::LDRi12: case ARM::LDRH: case ARM::LDRBi12: 549 case ARM::STRi12: case ARM::STRH: case ARM::STRBi12: 550 case ARM::t2LDRi12: case ARM::t2LDRi8: 551 case ARM::t2STRi12: case ARM::t2STRi8: 552 case ARM::VLDRS: case ARM::VLDRD: 553 case ARM::VSTRS: case ARM::VSTRD: 554 case ARM::tSTRspi: case ARM::tLDRspi: 555 break; 556 default: 557 return false; 558 } 559 560 // Without a virtual base register, if the function has variable sized 561 // objects, all fixed-size local references will be via the frame pointer, 562 // Approximate the offset and see if it's legal for the instruction. 563 // Note that the incoming offset is based on the SP value at function entry, 564 // so it'll be negative. 565 MachineFunction &MF = *MI->getParent()->getParent(); 566 const ARMFrameLowering *TFI = getFrameLowering(MF); 567 MachineFrameInfo &MFI = MF.getFrameInfo(); 568 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 569 570 // Estimate an offset from the frame pointer. 571 // Conservatively assume all callee-saved registers get pushed. R4-R6 572 // will be earlier than the FP, so we ignore those. 573 // R7, LR 574 int64_t FPOffset = Offset - 8; 575 // ARM and Thumb2 functions also need to consider R8-R11 and D8-D15 576 if (!AFI->isThumbFunction() || !AFI->isThumb1OnlyFunction()) 577 FPOffset -= 80; 578 // Estimate an offset from the stack pointer. 579 // The incoming offset is relating to the SP at the start of the function, 580 // but when we access the local it'll be relative to the SP after local 581 // allocation, so adjust our SP-relative offset by that allocation size. 582 Offset += MFI.getLocalFrameSize(); 583 // Assume that we'll have at least some spill slots allocated. 584 // FIXME: This is a total SWAG number. We should run some statistics 585 // and pick a real one. 586 Offset += 128; // 128 bytes of spill slots 587 588 // If there's a frame pointer and the addressing mode allows it, try using it. 589 // The FP is only available if there is no dynamic realignment. We 590 // don't know for sure yet whether we'll need that, so we guess based 591 // on whether there are any local variables that would trigger it. 592 unsigned StackAlign = TFI->getStackAlignment(); 593 if (TFI->hasFP(MF) && 594 !((MFI.getLocalFrameMaxAlign() > StackAlign) && canRealignStack(MF))) { 595 if (isFrameOffsetLegal(MI, getFrameRegister(MF), FPOffset)) 596 return false; 597 } 598 // If we can reference via the stack pointer, try that. 599 // FIXME: This (and the code that resolves the references) can be improved 600 // to only disallow SP relative references in the live range of 601 // the VLA(s). In practice, it's unclear how much difference that 602 // would make, but it may be worth doing. 603 if (!MFI.hasVarSizedObjects() && isFrameOffsetLegal(MI, ARM::SP, Offset)) 604 return false; 605 606 // The offset likely isn't legal, we want to allocate a virtual base register. 607 return true; 608 } 609 610 /// materializeFrameBaseRegister - Insert defining instruction(s) for BaseReg to 611 /// be a pointer to FrameIdx at the beginning of the basic block. 612 void ARMBaseRegisterInfo:: 613 materializeFrameBaseRegister(MachineBasicBlock *MBB, 614 unsigned BaseReg, int FrameIdx, 615 int64_t Offset) const { 616 ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>(); 617 unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri : 618 (AFI->isThumb1OnlyFunction() ? ARM::tADDframe : ARM::t2ADDri); 619 620 MachineBasicBlock::iterator Ins = MBB->begin(); 621 DebugLoc DL; // Defaults to "unknown" 622 if (Ins != MBB->end()) 623 DL = Ins->getDebugLoc(); 624 625 const MachineFunction &MF = *MBB->getParent(); 626 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 627 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 628 const MCInstrDesc &MCID = TII.get(ADDriOpc); 629 MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this, MF)); 630 631 MachineInstrBuilder MIB = BuildMI(*MBB, Ins, DL, MCID, BaseReg) 632 .addFrameIndex(FrameIdx).addImm(Offset); 633 634 if (!AFI->isThumb1OnlyFunction()) 635 MIB.add(predOps(ARMCC::AL)).add(condCodeOp()); 636 } 637 638 void ARMBaseRegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg, 639 int64_t Offset) const { 640 MachineBasicBlock &MBB = *MI.getParent(); 641 MachineFunction &MF = *MBB.getParent(); 642 const ARMBaseInstrInfo &TII = 643 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo()); 644 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 645 int Off = Offset; // ARM doesn't need the general 64-bit offsets 646 unsigned i = 0; 647 648 assert(!AFI->isThumb1OnlyFunction() && 649 "This resolveFrameIndex does not support Thumb1!"); 650 651 while (!MI.getOperand(i).isFI()) { 652 ++i; 653 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); 654 } 655 bool Done = false; 656 if (!AFI->isThumbFunction()) 657 Done = rewriteARMFrameIndex(MI, i, BaseReg, Off, TII); 658 else { 659 assert(AFI->isThumb2Function()); 660 Done = rewriteT2FrameIndex(MI, i, BaseReg, Off, TII); 661 } 662 assert(Done && "Unable to resolve frame index!"); 663 (void)Done; 664 } 665 666 bool ARMBaseRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI, unsigned BaseReg, 667 int64_t Offset) const { 668 const MCInstrDesc &Desc = MI->getDesc(); 669 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); 670 unsigned i = 0; 671 for (; !MI->getOperand(i).isFI(); ++i) 672 assert(i+1 < MI->getNumOperands() && "Instr doesn't have FrameIndex operand!"); 673 674 // AddrMode4 and AddrMode6 cannot handle any offset. 675 if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6) 676 return Offset == 0; 677 678 unsigned NumBits = 0; 679 unsigned Scale = 1; 680 bool isSigned = true; 681 switch (AddrMode) { 682 case ARMII::AddrModeT2_i8: 683 case ARMII::AddrModeT2_i12: 684 // i8 supports only negative, and i12 supports only positive, so 685 // based on Offset sign, consider the appropriate instruction 686 Scale = 1; 687 if (Offset < 0) { 688 NumBits = 8; 689 Offset = -Offset; 690 } else { 691 NumBits = 12; 692 } 693 break; 694 case ARMII::AddrMode5: 695 // VFP address mode. 696 NumBits = 8; 697 Scale = 4; 698 break; 699 case ARMII::AddrMode_i12: 700 case ARMII::AddrMode2: 701 NumBits = 12; 702 break; 703 case ARMII::AddrMode3: 704 NumBits = 8; 705 break; 706 case ARMII::AddrModeT1_s: 707 NumBits = (BaseReg == ARM::SP ? 8 : 5); 708 Scale = 4; 709 isSigned = false; 710 break; 711 default: 712 llvm_unreachable("Unsupported addressing mode!"); 713 } 714 715 Offset += getFrameIndexInstrOffset(MI, i); 716 // Make sure the offset is encodable for instructions that scale the 717 // immediate. 718 if ((Offset & (Scale-1)) != 0) 719 return false; 720 721 if (isSigned && Offset < 0) 722 Offset = -Offset; 723 724 unsigned Mask = (1 << NumBits) - 1; 725 if ((unsigned)Offset <= Mask * Scale) 726 return true; 727 728 return false; 729 } 730 731 void 732 ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 733 int SPAdj, unsigned FIOperandNum, 734 RegScavenger *RS) const { 735 MachineInstr &MI = *II; 736 MachineBasicBlock &MBB = *MI.getParent(); 737 MachineFunction &MF = *MBB.getParent(); 738 const ARMBaseInstrInfo &TII = 739 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo()); 740 const ARMFrameLowering *TFI = getFrameLowering(MF); 741 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 742 assert(!AFI->isThumb1OnlyFunction() && 743 "This eliminateFrameIndex does not support Thumb1!"); 744 int FrameIndex = MI.getOperand(FIOperandNum).getIndex(); 745 unsigned FrameReg; 746 747 int Offset = TFI->ResolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj); 748 749 // PEI::scavengeFrameVirtualRegs() cannot accurately track SPAdj because the 750 // call frame setup/destroy instructions have already been eliminated. That 751 // means the stack pointer cannot be used to access the emergency spill slot 752 // when !hasReservedCallFrame(). 753 #ifndef NDEBUG 754 if (RS && FrameReg == ARM::SP && RS->isScavengingFrameIndex(FrameIndex)){ 755 assert(TFI->hasReservedCallFrame(MF) && 756 "Cannot use SP to access the emergency spill slot in " 757 "functions without a reserved call frame"); 758 assert(!MF.getFrameInfo().hasVarSizedObjects() && 759 "Cannot use SP to access the emergency spill slot in " 760 "functions with variable sized frame objects"); 761 } 762 #endif // NDEBUG 763 764 assert(!MI.isDebugValue() && "DBG_VALUEs should be handled in target-independent code"); 765 766 // Modify MI as necessary to handle as much of 'Offset' as possible 767 bool Done = false; 768 if (!AFI->isThumbFunction()) 769 Done = rewriteARMFrameIndex(MI, FIOperandNum, FrameReg, Offset, TII); 770 else { 771 assert(AFI->isThumb2Function()); 772 Done = rewriteT2FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII); 773 } 774 if (Done) 775 return; 776 777 // If we get here, the immediate doesn't fit into the instruction. We folded 778 // as much as possible above, handle the rest, providing a register that is 779 // SP+LargeImm. 780 assert((Offset || 781 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode4 || 782 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode6) && 783 "This code isn't needed if offset already handled!"); 784 785 unsigned ScratchReg = 0; 786 int PIdx = MI.findFirstPredOperandIdx(); 787 ARMCC::CondCodes Pred = (PIdx == -1) 788 ? ARMCC::AL : (ARMCC::CondCodes)MI.getOperand(PIdx).getImm(); 789 unsigned PredReg = (PIdx == -1) ? 0 : MI.getOperand(PIdx+1).getReg(); 790 if (Offset == 0) 791 // Must be addrmode4/6. 792 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false, false, false); 793 else { 794 ScratchReg = MF.getRegInfo().createVirtualRegister(&ARM::GPRRegClass); 795 if (!AFI->isThumbFunction()) 796 emitARMRegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, 797 Offset, Pred, PredReg, TII); 798 else { 799 assert(AFI->isThumb2Function()); 800 emitT2RegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, 801 Offset, Pred, PredReg, TII); 802 } 803 // Update the original instruction to use the scratch register. 804 MI.getOperand(FIOperandNum).ChangeToRegister(ScratchReg, false, false,true); 805 } 806 } 807 808 bool ARMBaseRegisterInfo::shouldCoalesce(MachineInstr *MI, 809 const TargetRegisterClass *SrcRC, 810 unsigned SubReg, 811 const TargetRegisterClass *DstRC, 812 unsigned DstSubReg, 813 const TargetRegisterClass *NewRC, 814 LiveIntervals &LIS) const { 815 auto MBB = MI->getParent(); 816 auto MF = MBB->getParent(); 817 const MachineRegisterInfo &MRI = MF->getRegInfo(); 818 // If not copying into a sub-register this should be ok because we shouldn't 819 // need to split the reg. 820 if (!DstSubReg) 821 return true; 822 // Small registers don't frequently cause a problem, so we can coalesce them. 823 if (getRegSizeInBits(*NewRC) < 256 && getRegSizeInBits(*DstRC) < 256 && 824 getRegSizeInBits(*SrcRC) < 256) 825 return true; 826 827 auto NewRCWeight = 828 MRI.getTargetRegisterInfo()->getRegClassWeight(NewRC); 829 auto SrcRCWeight = 830 MRI.getTargetRegisterInfo()->getRegClassWeight(SrcRC); 831 auto DstRCWeight = 832 MRI.getTargetRegisterInfo()->getRegClassWeight(DstRC); 833 // If the source register class is more expensive than the destination, the 834 // coalescing is probably profitable. 835 if (SrcRCWeight.RegWeight > NewRCWeight.RegWeight) 836 return true; 837 if (DstRCWeight.RegWeight > NewRCWeight.RegWeight) 838 return true; 839 840 // If the register allocator isn't constrained, we can always allow coalescing 841 // unfortunately we don't know yet if we will be constrained. 842 // The goal of this heuristic is to restrict how many expensive registers 843 // we allow to coalesce in a given basic block. 844 auto AFI = MF->getInfo<ARMFunctionInfo>(); 845 auto It = AFI->getCoalescedWeight(MBB); 846 847 LLVM_DEBUG(dbgs() << "\tARM::shouldCoalesce - Coalesced Weight: " 848 << It->second << "\n"); 849 LLVM_DEBUG(dbgs() << "\tARM::shouldCoalesce - Reg Weight: " 850 << NewRCWeight.RegWeight << "\n"); 851 852 // This number is the largest round number that which meets the criteria: 853 // (1) addresses PR18825 854 // (2) generates better code in some test cases (like vldm-shed-a9.ll) 855 // (3) Doesn't regress any test cases (in-tree, test-suite, and SPEC) 856 // In practice the SizeMultiplier will only factor in for straight line code 857 // that uses a lot of NEON vectors, which isn't terribly common. 858 unsigned SizeMultiplier = MBB->size()/100; 859 SizeMultiplier = SizeMultiplier ? SizeMultiplier : 1; 860 if (It->second < NewRCWeight.WeightLimit * SizeMultiplier) { 861 It->second += NewRCWeight.RegWeight; 862 return true; 863 } 864 return false; 865 } 866