1 //===-- ARMBaseRegisterInfo.cpp - ARM Register Information ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the base ARM implementation of TargetRegisterInfo class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "ARMBaseRegisterInfo.h" 15 #include "ARM.h" 16 #include "ARMBaseInstrInfo.h" 17 #include "ARMFrameLowering.h" 18 #include "ARMMachineFunctionInfo.h" 19 #include "ARMSubtarget.h" 20 #include "MCTargetDesc/ARMAddressingModes.h" 21 #include "llvm/ADT/BitVector.h" 22 #include "llvm/ADT/SmallVector.h" 23 #include "llvm/CodeGen/MachineConstantPool.h" 24 #include "llvm/CodeGen/MachineFrameInfo.h" 25 #include "llvm/CodeGen/MachineFunction.h" 26 #include "llvm/CodeGen/MachineInstrBuilder.h" 27 #include "llvm/CodeGen/MachineRegisterInfo.h" 28 #include "llvm/CodeGen/RegisterScavenging.h" 29 #include "llvm/CodeGen/VirtRegMap.h" 30 #include "llvm/IR/Constants.h" 31 #include "llvm/IR/DerivedTypes.h" 32 #include "llvm/IR/Function.h" 33 #include "llvm/IR/LLVMContext.h" 34 #include "llvm/Support/Debug.h" 35 #include "llvm/Support/ErrorHandling.h" 36 #include "llvm/Support/raw_ostream.h" 37 #include "llvm/Target/TargetFrameLowering.h" 38 #include "llvm/Target/TargetMachine.h" 39 #include "llvm/Target/TargetOptions.h" 40 41 #define DEBUG_TYPE "arm-register-info" 42 43 #define GET_REGINFO_TARGET_DESC 44 #include "ARMGenRegisterInfo.inc" 45 46 using namespace llvm; 47 48 ARMBaseRegisterInfo::ARMBaseRegisterInfo() 49 : ARMGenRegisterInfo(ARM::LR, 0, 0, ARM::PC), BasePtr(ARM::R6) {} 50 51 static unsigned getFramePointerReg(const ARMSubtarget &STI) { 52 return STI.useR7AsFramePointer() ? ARM::R7 : ARM::R11; 53 } 54 55 const MCPhysReg* 56 ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 57 const ARMSubtarget &STI = MF->getSubtarget<ARMSubtarget>(); 58 bool UseSplitPush = STI.splitFramePushPop(*MF); 59 const MCPhysReg *RegList = 60 STI.isTargetDarwin() 61 ? CSR_iOS_SaveList 62 : (UseSplitPush ? CSR_AAPCS_SplitPush_SaveList : CSR_AAPCS_SaveList); 63 64 const Function *F = MF->getFunction(); 65 if (F->getCallingConv() == CallingConv::GHC) { 66 // GHC set of callee saved regs is empty as all those regs are 67 // used for passing STG regs around 68 return CSR_NoRegs_SaveList; 69 } else if (F->hasFnAttribute("interrupt")) { 70 if (STI.isMClass()) { 71 // M-class CPUs have hardware which saves the registers needed to allow a 72 // function conforming to the AAPCS to function as a handler. 73 return UseSplitPush ? CSR_AAPCS_SplitPush_SaveList : CSR_AAPCS_SaveList; 74 } else if (F->getFnAttribute("interrupt").getValueAsString() == "FIQ") { 75 // Fast interrupt mode gives the handler a private copy of R8-R14, so less 76 // need to be saved to restore user-mode state. 77 return CSR_FIQ_SaveList; 78 } else { 79 // Generally only R13-R14 (i.e. SP, LR) are automatically preserved by 80 // exception handling. 81 return CSR_GenericInt_SaveList; 82 } 83 } 84 85 if (STI.isTargetDarwin() && STI.getTargetLowering()->supportSwiftError() && 86 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) 87 return CSR_iOS_SwiftError_SaveList; 88 89 if (STI.isTargetDarwin() && F->getCallingConv() == CallingConv::CXX_FAST_TLS) 90 return MF->getInfo<ARMFunctionInfo>()->isSplitCSR() 91 ? CSR_iOS_CXX_TLS_PE_SaveList 92 : CSR_iOS_CXX_TLS_SaveList; 93 return RegList; 94 } 95 96 const MCPhysReg *ARMBaseRegisterInfo::getCalleeSavedRegsViaCopy( 97 const MachineFunction *MF) const { 98 assert(MF && "Invalid MachineFunction pointer."); 99 if (MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS && 100 MF->getInfo<ARMFunctionInfo>()->isSplitCSR()) 101 return CSR_iOS_CXX_TLS_ViaCopy_SaveList; 102 return nullptr; 103 } 104 105 const uint32_t * 106 ARMBaseRegisterInfo::getCallPreservedMask(const MachineFunction &MF, 107 CallingConv::ID CC) const { 108 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 109 if (CC == CallingConv::GHC) 110 // This is academic becase all GHC calls are (supposed to be) tail calls 111 return CSR_NoRegs_RegMask; 112 113 if (STI.isTargetDarwin() && STI.getTargetLowering()->supportSwiftError() && 114 MF.getFunction()->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) 115 return CSR_iOS_SwiftError_RegMask; 116 117 if (STI.isTargetDarwin() && CC == CallingConv::CXX_FAST_TLS) 118 return CSR_iOS_CXX_TLS_RegMask; 119 return STI.isTargetDarwin() ? CSR_iOS_RegMask : CSR_AAPCS_RegMask; 120 } 121 122 const uint32_t* 123 ARMBaseRegisterInfo::getNoPreservedMask() const { 124 return CSR_NoRegs_RegMask; 125 } 126 127 const uint32_t * 128 ARMBaseRegisterInfo::getTLSCallPreservedMask(const MachineFunction &MF) const { 129 assert(MF.getSubtarget<ARMSubtarget>().isTargetDarwin() && 130 "only know about special TLS call on Darwin"); 131 return CSR_iOS_TLSCall_RegMask; 132 } 133 134 const uint32_t * 135 ARMBaseRegisterInfo::getSjLjDispatchPreservedMask(const MachineFunction &MF) const { 136 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 137 if (!STI.useSoftFloat() && STI.hasVFP2() && !STI.isThumb1Only()) 138 return CSR_NoRegs_RegMask; 139 else 140 return CSR_FPRegs_RegMask; 141 } 142 143 144 const uint32_t * 145 ARMBaseRegisterInfo::getThisReturnPreservedMask(const MachineFunction &MF, 146 CallingConv::ID CC) const { 147 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 148 // This should return a register mask that is the same as that returned by 149 // getCallPreservedMask but that additionally preserves the register used for 150 // the first i32 argument (which must also be the register used to return a 151 // single i32 return value) 152 // 153 // In case that the calling convention does not use the same register for 154 // both or otherwise does not want to enable this optimization, the function 155 // should return NULL 156 if (CC == CallingConv::GHC) 157 // This is academic becase all GHC calls are (supposed to be) tail calls 158 return nullptr; 159 return STI.isTargetDarwin() ? CSR_iOS_ThisReturn_RegMask 160 : CSR_AAPCS_ThisReturn_RegMask; 161 } 162 163 BitVector ARMBaseRegisterInfo:: 164 getReservedRegs(const MachineFunction &MF) const { 165 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 166 const ARMFrameLowering *TFI = getFrameLowering(MF); 167 168 // FIXME: avoid re-calculating this every time. 169 BitVector Reserved(getNumRegs()); 170 Reserved.set(ARM::SP); 171 Reserved.set(ARM::PC); 172 Reserved.set(ARM::FPSCR); 173 Reserved.set(ARM::APSR_NZCV); 174 if (TFI->hasFP(MF)) 175 Reserved.set(getFramePointerReg(STI)); 176 if (hasBasePointer(MF)) 177 Reserved.set(BasePtr); 178 // Some targets reserve R9. 179 if (STI.isR9Reserved()) 180 Reserved.set(ARM::R9); 181 // Reserve D16-D31 if the subtarget doesn't support them. 182 if (!STI.hasVFP3() || STI.hasD16()) { 183 static_assert(ARM::D31 == ARM::D16 + 15, "Register list not consecutive!"); 184 Reserved.set(ARM::D16, ARM::D31 + 1); 185 } 186 const TargetRegisterClass *RC = &ARM::GPRPairRegClass; 187 for(TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); I!=E; ++I) 188 for (MCSubRegIterator SI(*I, this); SI.isValid(); ++SI) 189 if (Reserved.test(*SI)) Reserved.set(*I); 190 191 return Reserved; 192 } 193 194 const TargetRegisterClass * 195 ARMBaseRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC, 196 const MachineFunction &) const { 197 const TargetRegisterClass *Super = RC; 198 TargetRegisterClass::sc_iterator I = RC->getSuperClasses(); 199 do { 200 switch (Super->getID()) { 201 case ARM::GPRRegClassID: 202 case ARM::SPRRegClassID: 203 case ARM::DPRRegClassID: 204 case ARM::QPRRegClassID: 205 case ARM::QQPRRegClassID: 206 case ARM::QQQQPRRegClassID: 207 case ARM::GPRPairRegClassID: 208 return Super; 209 } 210 Super = *I++; 211 } while (Super); 212 return RC; 213 } 214 215 const TargetRegisterClass * 216 ARMBaseRegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind) 217 const { 218 return &ARM::GPRRegClass; 219 } 220 221 const TargetRegisterClass * 222 ARMBaseRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { 223 if (RC == &ARM::CCRRegClass) 224 return &ARM::rGPRRegClass; // Can't copy CCR registers. 225 return RC; 226 } 227 228 unsigned 229 ARMBaseRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, 230 MachineFunction &MF) const { 231 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 232 const ARMFrameLowering *TFI = getFrameLowering(MF); 233 234 switch (RC->getID()) { 235 default: 236 return 0; 237 case ARM::tGPRRegClassID: 238 return TFI->hasFP(MF) ? 4 : 5; 239 case ARM::GPRRegClassID: { 240 unsigned FP = TFI->hasFP(MF) ? 1 : 0; 241 return 10 - FP - (STI.isR9Reserved() ? 1 : 0); 242 } 243 case ARM::SPRRegClassID: // Currently not used as 'rep' register class. 244 case ARM::DPRRegClassID: 245 return 32 - 10; 246 } 247 } 248 249 // Get the other register in a GPRPair. 250 static unsigned getPairedGPR(unsigned Reg, bool Odd, const MCRegisterInfo *RI) { 251 for (MCSuperRegIterator Supers(Reg, RI); Supers.isValid(); ++Supers) 252 if (ARM::GPRPairRegClass.contains(*Supers)) 253 return RI->getSubReg(*Supers, Odd ? ARM::gsub_1 : ARM::gsub_0); 254 return 0; 255 } 256 257 // Resolve the RegPairEven / RegPairOdd register allocator hints. 258 void 259 ARMBaseRegisterInfo::getRegAllocationHints(unsigned VirtReg, 260 ArrayRef<MCPhysReg> Order, 261 SmallVectorImpl<MCPhysReg> &Hints, 262 const MachineFunction &MF, 263 const VirtRegMap *VRM, 264 const LiveRegMatrix *Matrix) const { 265 const MachineRegisterInfo &MRI = MF.getRegInfo(); 266 std::pair<unsigned, unsigned> Hint = MRI.getRegAllocationHint(VirtReg); 267 268 unsigned Odd; 269 switch (Hint.first) { 270 case ARMRI::RegPairEven: 271 Odd = 0; 272 break; 273 case ARMRI::RegPairOdd: 274 Odd = 1; 275 break; 276 default: 277 TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF, VRM); 278 return; 279 } 280 281 // This register should preferably be even (Odd == 0) or odd (Odd == 1). 282 // Check if the other part of the pair has already been assigned, and provide 283 // the paired register as the first hint. 284 unsigned Paired = Hint.second; 285 if (Paired == 0) 286 return; 287 288 unsigned PairedPhys = 0; 289 if (TargetRegisterInfo::isPhysicalRegister(Paired)) { 290 PairedPhys = Paired; 291 } else if (VRM && VRM->hasPhys(Paired)) { 292 PairedPhys = getPairedGPR(VRM->getPhys(Paired), Odd, this); 293 } 294 295 // First prefer the paired physreg. 296 if (PairedPhys && is_contained(Order, PairedPhys)) 297 Hints.push_back(PairedPhys); 298 299 // Then prefer even or odd registers. 300 for (unsigned I = 0, E = Order.size(); I != E; ++I) { 301 unsigned Reg = Order[I]; 302 if (Reg == PairedPhys || (getEncodingValue(Reg) & 1) != Odd) 303 continue; 304 // Don't provide hints that are paired to a reserved register. 305 unsigned Paired = getPairedGPR(Reg, !Odd, this); 306 if (!Paired || MRI.isReserved(Paired)) 307 continue; 308 Hints.push_back(Reg); 309 } 310 } 311 312 void 313 ARMBaseRegisterInfo::updateRegAllocHint(unsigned Reg, unsigned NewReg, 314 MachineFunction &MF) const { 315 MachineRegisterInfo *MRI = &MF.getRegInfo(); 316 std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(Reg); 317 if ((Hint.first == (unsigned)ARMRI::RegPairOdd || 318 Hint.first == (unsigned)ARMRI::RegPairEven) && 319 TargetRegisterInfo::isVirtualRegister(Hint.second)) { 320 // If 'Reg' is one of the even / odd register pair and it's now changed 321 // (e.g. coalesced) into a different register. The other register of the 322 // pair allocation hint must be updated to reflect the relationship 323 // change. 324 unsigned OtherReg = Hint.second; 325 Hint = MRI->getRegAllocationHint(OtherReg); 326 // Make sure the pair has not already divorced. 327 if (Hint.second == Reg) { 328 MRI->setRegAllocationHint(OtherReg, Hint.first, NewReg); 329 if (TargetRegisterInfo::isVirtualRegister(NewReg)) 330 MRI->setRegAllocationHint(NewReg, 331 Hint.first == (unsigned)ARMRI::RegPairOdd ? ARMRI::RegPairEven 332 : ARMRI::RegPairOdd, OtherReg); 333 } 334 } 335 } 336 337 bool ARMBaseRegisterInfo::hasBasePointer(const MachineFunction &MF) const { 338 const MachineFrameInfo &MFI = MF.getFrameInfo(); 339 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 340 const ARMFrameLowering *TFI = getFrameLowering(MF); 341 342 // When outgoing call frames are so large that we adjust the stack pointer 343 // around the call, we can no longer use the stack pointer to reach the 344 // emergency spill slot. 345 if (needsStackRealignment(MF) && !TFI->hasReservedCallFrame(MF)) 346 return true; 347 348 // Thumb has trouble with negative offsets from the FP. Thumb2 has a limited 349 // negative range for ldr/str (255), and thumb1 is positive offsets only. 350 // It's going to be better to use the SP or Base Pointer instead. When there 351 // are variable sized objects, we can't reference off of the SP, so we 352 // reserve a Base Pointer. 353 if (AFI->isThumbFunction() && MFI.hasVarSizedObjects()) { 354 // Conservatively estimate whether the negative offset from the frame 355 // pointer will be sufficient to reach. If a function has a smallish 356 // frame, it's less likely to have lots of spills and callee saved 357 // space, so it's all more likely to be within range of the frame pointer. 358 // If it's wrong, the scavenger will still enable access to work, it just 359 // won't be optimal. 360 if (AFI->isThumb2Function() && MFI.getLocalFrameSize() < 128) 361 return false; 362 return true; 363 } 364 365 return false; 366 } 367 368 bool ARMBaseRegisterInfo::canRealignStack(const MachineFunction &MF) const { 369 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 370 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 371 const ARMFrameLowering *TFI = getFrameLowering(MF); 372 // We can't realign the stack if: 373 // 1. Dynamic stack realignment is explicitly disabled, 374 // 2. This is a Thumb1 function (it's not useful, so we don't bother), or 375 // 3. There are VLAs in the function and the base pointer is disabled. 376 if (!TargetRegisterInfo::canRealignStack(MF)) 377 return false; 378 if (AFI->isThumb1OnlyFunction()) 379 return false; 380 // Stack realignment requires a frame pointer. If we already started 381 // register allocation with frame pointer elimination, it is too late now. 382 if (!MRI->canReserveReg(getFramePointerReg(MF.getSubtarget<ARMSubtarget>()))) 383 return false; 384 // We may also need a base pointer if there are dynamic allocas or stack 385 // pointer adjustments around calls. 386 if (TFI->hasReservedCallFrame(MF)) 387 return true; 388 // A base pointer is required and allowed. Check that it isn't too late to 389 // reserve it. 390 return MRI->canReserveReg(BasePtr); 391 } 392 393 bool ARMBaseRegisterInfo:: 394 cannotEliminateFrame(const MachineFunction &MF) const { 395 const MachineFrameInfo &MFI = MF.getFrameInfo(); 396 if (MF.getTarget().Options.DisableFramePointerElim(MF) && MFI.adjustsStack()) 397 return true; 398 return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() 399 || needsStackRealignment(MF); 400 } 401 402 unsigned 403 ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const { 404 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 405 const ARMFrameLowering *TFI = getFrameLowering(MF); 406 407 if (TFI->hasFP(MF)) 408 return getFramePointerReg(STI); 409 return ARM::SP; 410 } 411 412 /// emitLoadConstPool - Emits a load from constpool to materialize the 413 /// specified immediate. 414 void ARMBaseRegisterInfo::emitLoadConstPool( 415 MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 416 const DebugLoc &dl, unsigned DestReg, unsigned SubIdx, int Val, 417 ARMCC::CondCodes Pred, unsigned PredReg, unsigned MIFlags) const { 418 MachineFunction &MF = *MBB.getParent(); 419 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 420 MachineConstantPool *ConstantPool = MF.getConstantPool(); 421 const Constant *C = 422 ConstantInt::get(Type::getInt32Ty(MF.getFunction()->getContext()), Val); 423 unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4); 424 425 BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp)) 426 .addReg(DestReg, getDefRegState(true), SubIdx) 427 .addConstantPoolIndex(Idx) 428 .addImm(0).addImm(Pred).addReg(PredReg) 429 .setMIFlags(MIFlags); 430 } 431 432 bool ARMBaseRegisterInfo:: 433 requiresRegisterScavenging(const MachineFunction &MF) const { 434 return true; 435 } 436 437 bool ARMBaseRegisterInfo:: 438 trackLivenessAfterRegAlloc(const MachineFunction &MF) const { 439 return true; 440 } 441 442 bool ARMBaseRegisterInfo:: 443 requiresFrameIndexScavenging(const MachineFunction &MF) const { 444 return true; 445 } 446 447 bool ARMBaseRegisterInfo:: 448 requiresVirtualBaseRegisters(const MachineFunction &MF) const { 449 return true; 450 } 451 452 int64_t ARMBaseRegisterInfo:: 453 getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const { 454 const MCInstrDesc &Desc = MI->getDesc(); 455 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); 456 int64_t InstrOffs = 0; 457 int Scale = 1; 458 unsigned ImmIdx = 0; 459 switch (AddrMode) { 460 case ARMII::AddrModeT2_i8: 461 case ARMII::AddrModeT2_i12: 462 case ARMII::AddrMode_i12: 463 InstrOffs = MI->getOperand(Idx+1).getImm(); 464 Scale = 1; 465 break; 466 case ARMII::AddrMode5: { 467 // VFP address mode. 468 const MachineOperand &OffOp = MI->getOperand(Idx+1); 469 InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm()); 470 if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub) 471 InstrOffs = -InstrOffs; 472 Scale = 4; 473 break; 474 } 475 case ARMII::AddrMode2: { 476 ImmIdx = Idx+2; 477 InstrOffs = ARM_AM::getAM2Offset(MI->getOperand(ImmIdx).getImm()); 478 if (ARM_AM::getAM2Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub) 479 InstrOffs = -InstrOffs; 480 break; 481 } 482 case ARMII::AddrMode3: { 483 ImmIdx = Idx+2; 484 InstrOffs = ARM_AM::getAM3Offset(MI->getOperand(ImmIdx).getImm()); 485 if (ARM_AM::getAM3Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub) 486 InstrOffs = -InstrOffs; 487 break; 488 } 489 case ARMII::AddrModeT1_s: { 490 ImmIdx = Idx+1; 491 InstrOffs = MI->getOperand(ImmIdx).getImm(); 492 Scale = 4; 493 break; 494 } 495 default: 496 llvm_unreachable("Unsupported addressing mode!"); 497 } 498 499 return InstrOffs * Scale; 500 } 501 502 /// needsFrameBaseReg - Returns true if the instruction's frame index 503 /// reference would be better served by a base register other than FP 504 /// or SP. Used by LocalStackFrameAllocation to determine which frame index 505 /// references it should create new base registers for. 506 bool ARMBaseRegisterInfo:: 507 needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const { 508 for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i) { 509 assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!"); 510 } 511 512 // It's the load/store FI references that cause issues, as it can be difficult 513 // to materialize the offset if it won't fit in the literal field. Estimate 514 // based on the size of the local frame and some conservative assumptions 515 // about the rest of the stack frame (note, this is pre-regalloc, so 516 // we don't know everything for certain yet) whether this offset is likely 517 // to be out of range of the immediate. Return true if so. 518 519 // We only generate virtual base registers for loads and stores, so 520 // return false for everything else. 521 unsigned Opc = MI->getOpcode(); 522 switch (Opc) { 523 case ARM::LDRi12: case ARM::LDRH: case ARM::LDRBi12: 524 case ARM::STRi12: case ARM::STRH: case ARM::STRBi12: 525 case ARM::t2LDRi12: case ARM::t2LDRi8: 526 case ARM::t2STRi12: case ARM::t2STRi8: 527 case ARM::VLDRS: case ARM::VLDRD: 528 case ARM::VSTRS: case ARM::VSTRD: 529 case ARM::tSTRspi: case ARM::tLDRspi: 530 break; 531 default: 532 return false; 533 } 534 535 // Without a virtual base register, if the function has variable sized 536 // objects, all fixed-size local references will be via the frame pointer, 537 // Approximate the offset and see if it's legal for the instruction. 538 // Note that the incoming offset is based on the SP value at function entry, 539 // so it'll be negative. 540 MachineFunction &MF = *MI->getParent()->getParent(); 541 const ARMFrameLowering *TFI = getFrameLowering(MF); 542 MachineFrameInfo &MFI = MF.getFrameInfo(); 543 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 544 545 // Estimate an offset from the frame pointer. 546 // Conservatively assume all callee-saved registers get pushed. R4-R6 547 // will be earlier than the FP, so we ignore those. 548 // R7, LR 549 int64_t FPOffset = Offset - 8; 550 // ARM and Thumb2 functions also need to consider R8-R11 and D8-D15 551 if (!AFI->isThumbFunction() || !AFI->isThumb1OnlyFunction()) 552 FPOffset -= 80; 553 // Estimate an offset from the stack pointer. 554 // The incoming offset is relating to the SP at the start of the function, 555 // but when we access the local it'll be relative to the SP after local 556 // allocation, so adjust our SP-relative offset by that allocation size. 557 Offset += MFI.getLocalFrameSize(); 558 // Assume that we'll have at least some spill slots allocated. 559 // FIXME: This is a total SWAG number. We should run some statistics 560 // and pick a real one. 561 Offset += 128; // 128 bytes of spill slots 562 563 // If there's a frame pointer and the addressing mode allows it, try using it. 564 // The FP is only available if there is no dynamic realignment. We 565 // don't know for sure yet whether we'll need that, so we guess based 566 // on whether there are any local variables that would trigger it. 567 unsigned StackAlign = TFI->getStackAlignment(); 568 if (TFI->hasFP(MF) && 569 !((MFI.getLocalFrameMaxAlign() > StackAlign) && canRealignStack(MF))) { 570 if (isFrameOffsetLegal(MI, getFrameRegister(MF), FPOffset)) 571 return false; 572 } 573 // If we can reference via the stack pointer, try that. 574 // FIXME: This (and the code that resolves the references) can be improved 575 // to only disallow SP relative references in the live range of 576 // the VLA(s). In practice, it's unclear how much difference that 577 // would make, but it may be worth doing. 578 if (!MFI.hasVarSizedObjects() && isFrameOffsetLegal(MI, ARM::SP, Offset)) 579 return false; 580 581 // The offset likely isn't legal, we want to allocate a virtual base register. 582 return true; 583 } 584 585 /// materializeFrameBaseRegister - Insert defining instruction(s) for BaseReg to 586 /// be a pointer to FrameIdx at the beginning of the basic block. 587 void ARMBaseRegisterInfo:: 588 materializeFrameBaseRegister(MachineBasicBlock *MBB, 589 unsigned BaseReg, int FrameIdx, 590 int64_t Offset) const { 591 ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>(); 592 unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri : 593 (AFI->isThumb1OnlyFunction() ? ARM::tADDframe : ARM::t2ADDri); 594 595 MachineBasicBlock::iterator Ins = MBB->begin(); 596 DebugLoc DL; // Defaults to "unknown" 597 if (Ins != MBB->end()) 598 DL = Ins->getDebugLoc(); 599 600 const MachineFunction &MF = *MBB->getParent(); 601 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 602 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 603 const MCInstrDesc &MCID = TII.get(ADDriOpc); 604 MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this, MF)); 605 606 MachineInstrBuilder MIB = BuildMI(*MBB, Ins, DL, MCID, BaseReg) 607 .addFrameIndex(FrameIdx).addImm(Offset); 608 609 if (!AFI->isThumb1OnlyFunction()) 610 AddDefaultCC(AddDefaultPred(MIB)); 611 } 612 613 void ARMBaseRegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg, 614 int64_t Offset) const { 615 MachineBasicBlock &MBB = *MI.getParent(); 616 MachineFunction &MF = *MBB.getParent(); 617 const ARMBaseInstrInfo &TII = 618 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo()); 619 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 620 int Off = Offset; // ARM doesn't need the general 64-bit offsets 621 unsigned i = 0; 622 623 assert(!AFI->isThumb1OnlyFunction() && 624 "This resolveFrameIndex does not support Thumb1!"); 625 626 while (!MI.getOperand(i).isFI()) { 627 ++i; 628 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); 629 } 630 bool Done = false; 631 if (!AFI->isThumbFunction()) 632 Done = rewriteARMFrameIndex(MI, i, BaseReg, Off, TII); 633 else { 634 assert(AFI->isThumb2Function()); 635 Done = rewriteT2FrameIndex(MI, i, BaseReg, Off, TII); 636 } 637 assert (Done && "Unable to resolve frame index!"); 638 (void)Done; 639 } 640 641 bool ARMBaseRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI, unsigned BaseReg, 642 int64_t Offset) const { 643 const MCInstrDesc &Desc = MI->getDesc(); 644 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); 645 unsigned i = 0; 646 647 while (!MI->getOperand(i).isFI()) { 648 ++i; 649 assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!"); 650 } 651 652 // AddrMode4 and AddrMode6 cannot handle any offset. 653 if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6) 654 return Offset == 0; 655 656 unsigned NumBits = 0; 657 unsigned Scale = 1; 658 bool isSigned = true; 659 switch (AddrMode) { 660 case ARMII::AddrModeT2_i8: 661 case ARMII::AddrModeT2_i12: 662 // i8 supports only negative, and i12 supports only positive, so 663 // based on Offset sign, consider the appropriate instruction 664 Scale = 1; 665 if (Offset < 0) { 666 NumBits = 8; 667 Offset = -Offset; 668 } else { 669 NumBits = 12; 670 } 671 break; 672 case ARMII::AddrMode5: 673 // VFP address mode. 674 NumBits = 8; 675 Scale = 4; 676 break; 677 case ARMII::AddrMode_i12: 678 case ARMII::AddrMode2: 679 NumBits = 12; 680 break; 681 case ARMII::AddrMode3: 682 NumBits = 8; 683 break; 684 case ARMII::AddrModeT1_s: 685 NumBits = (BaseReg == ARM::SP ? 8 : 5); 686 Scale = 4; 687 isSigned = false; 688 break; 689 default: 690 llvm_unreachable("Unsupported addressing mode!"); 691 } 692 693 Offset += getFrameIndexInstrOffset(MI, i); 694 // Make sure the offset is encodable for instructions that scale the 695 // immediate. 696 if ((Offset & (Scale-1)) != 0) 697 return false; 698 699 if (isSigned && Offset < 0) 700 Offset = -Offset; 701 702 unsigned Mask = (1 << NumBits) - 1; 703 if ((unsigned)Offset <= Mask * Scale) 704 return true; 705 706 return false; 707 } 708 709 void 710 ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 711 int SPAdj, unsigned FIOperandNum, 712 RegScavenger *RS) const { 713 MachineInstr &MI = *II; 714 MachineBasicBlock &MBB = *MI.getParent(); 715 MachineFunction &MF = *MBB.getParent(); 716 const ARMBaseInstrInfo &TII = 717 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo()); 718 const ARMFrameLowering *TFI = getFrameLowering(MF); 719 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 720 assert(!AFI->isThumb1OnlyFunction() && 721 "This eliminateFrameIndex does not support Thumb1!"); 722 int FrameIndex = MI.getOperand(FIOperandNum).getIndex(); 723 unsigned FrameReg; 724 725 int Offset = TFI->ResolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj); 726 727 // PEI::scavengeFrameVirtualRegs() cannot accurately track SPAdj because the 728 // call frame setup/destroy instructions have already been eliminated. That 729 // means the stack pointer cannot be used to access the emergency spill slot 730 // when !hasReservedCallFrame(). 731 #ifndef NDEBUG 732 if (RS && FrameReg == ARM::SP && RS->isScavengingFrameIndex(FrameIndex)){ 733 assert(TFI->hasReservedCallFrame(MF) && 734 "Cannot use SP to access the emergency spill slot in " 735 "functions without a reserved call frame"); 736 assert(!MF.getFrameInfo().hasVarSizedObjects() && 737 "Cannot use SP to access the emergency spill slot in " 738 "functions with variable sized frame objects"); 739 } 740 #endif // NDEBUG 741 742 assert(!MI.isDebugValue() && "DBG_VALUEs should be handled in target-independent code"); 743 744 // Modify MI as necessary to handle as much of 'Offset' as possible 745 bool Done = false; 746 if (!AFI->isThumbFunction()) 747 Done = rewriteARMFrameIndex(MI, FIOperandNum, FrameReg, Offset, TII); 748 else { 749 assert(AFI->isThumb2Function()); 750 Done = rewriteT2FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII); 751 } 752 if (Done) 753 return; 754 755 // If we get here, the immediate doesn't fit into the instruction. We folded 756 // as much as possible above, handle the rest, providing a register that is 757 // SP+LargeImm. 758 assert((Offset || 759 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode4 || 760 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode6) && 761 "This code isn't needed if offset already handled!"); 762 763 unsigned ScratchReg = 0; 764 int PIdx = MI.findFirstPredOperandIdx(); 765 ARMCC::CondCodes Pred = (PIdx == -1) 766 ? ARMCC::AL : (ARMCC::CondCodes)MI.getOperand(PIdx).getImm(); 767 unsigned PredReg = (PIdx == -1) ? 0 : MI.getOperand(PIdx+1).getReg(); 768 if (Offset == 0) 769 // Must be addrmode4/6. 770 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false, false, false); 771 else { 772 ScratchReg = MF.getRegInfo().createVirtualRegister(&ARM::GPRRegClass); 773 if (!AFI->isThumbFunction()) 774 emitARMRegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, 775 Offset, Pred, PredReg, TII); 776 else { 777 assert(AFI->isThumb2Function()); 778 emitT2RegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, 779 Offset, Pred, PredReg, TII); 780 } 781 // Update the original instruction to use the scratch register. 782 MI.getOperand(FIOperandNum).ChangeToRegister(ScratchReg, false, false,true); 783 } 784 } 785 786 bool ARMBaseRegisterInfo::shouldCoalesce(MachineInstr *MI, 787 const TargetRegisterClass *SrcRC, 788 unsigned SubReg, 789 const TargetRegisterClass *DstRC, 790 unsigned DstSubReg, 791 const TargetRegisterClass *NewRC) const { 792 auto MBB = MI->getParent(); 793 auto MF = MBB->getParent(); 794 const MachineRegisterInfo &MRI = MF->getRegInfo(); 795 // If not copying into a sub-register this should be ok because we shouldn't 796 // need to split the reg. 797 if (!DstSubReg) 798 return true; 799 // Small registers don't frequently cause a problem, so we can coalesce them. 800 if (NewRC->getSize() < 32 && DstRC->getSize() < 32 && SrcRC->getSize() < 32) 801 return true; 802 803 auto NewRCWeight = 804 MRI.getTargetRegisterInfo()->getRegClassWeight(NewRC); 805 auto SrcRCWeight = 806 MRI.getTargetRegisterInfo()->getRegClassWeight(SrcRC); 807 auto DstRCWeight = 808 MRI.getTargetRegisterInfo()->getRegClassWeight(DstRC); 809 // If the source register class is more expensive than the destination, the 810 // coalescing is probably profitable. 811 if (SrcRCWeight.RegWeight > NewRCWeight.RegWeight) 812 return true; 813 if (DstRCWeight.RegWeight > NewRCWeight.RegWeight) 814 return true; 815 816 // If the register allocator isn't constrained, we can always allow coalescing 817 // unfortunately we don't know yet if we will be constrained. 818 // The goal of this heuristic is to restrict how many expensive registers 819 // we allow to coalesce in a given basic block. 820 auto AFI = MF->getInfo<ARMFunctionInfo>(); 821 auto It = AFI->getCoalescedWeight(MBB); 822 823 DEBUG(dbgs() << "\tARM::shouldCoalesce - Coalesced Weight: " 824 << It->second << "\n"); 825 DEBUG(dbgs() << "\tARM::shouldCoalesce - Reg Weight: " 826 << NewRCWeight.RegWeight << "\n"); 827 828 // This number is the largest round number that which meets the criteria: 829 // (1) addresses PR18825 830 // (2) generates better code in some test cases (like vldm-shed-a9.ll) 831 // (3) Doesn't regress any test cases (in-tree, test-suite, and SPEC) 832 // In practice the SizeMultiplier will only factor in for straight line code 833 // that uses a lot of NEON vectors, which isn't terribly common. 834 unsigned SizeMultiplier = MBB->size()/100; 835 SizeMultiplier = SizeMultiplier ? SizeMultiplier : 1; 836 if (It->second < NewRCWeight.WeightLimit * SizeMultiplier) { 837 It->second += NewRCWeight.RegWeight; 838 return true; 839 } 840 return false; 841 } 842