1 //===-- ThumbRegisterInfo.cpp - Thumb-1 Register Information -------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the Thumb-1 implementation of the TargetRegisterInfo
11 // class.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "ThumbRegisterInfo.h"
16 #include "ARMBaseInstrInfo.h"
17 #include "ARMMachineFunctionInfo.h"
18 #include "ARMSubtarget.h"
19 #include "MCTargetDesc/ARMAddressingModes.h"
20 #include "llvm/CodeGen/MachineConstantPool.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineFunction.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/RegisterScavenging.h"
26 #include "llvm/IR/Constants.h"
27 #include "llvm/IR/DerivedTypes.h"
28 #include "llvm/IR/Function.h"
29 #include "llvm/IR/LLVMContext.h"
30 #include "llvm/Support/CommandLine.h"
31 #include "llvm/Support/ErrorHandling.h"
32 #include "llvm/Target/TargetFrameLowering.h"
33 #include "llvm/Target/TargetMachine.h"
34 
35 namespace llvm {
36 extern cl::opt<bool> ReuseFrameIndexVals;
37 }
38 
39 using namespace llvm;
40 
41 ThumbRegisterInfo::ThumbRegisterInfo() : ARMBaseRegisterInfo() {}
42 
43 const TargetRegisterClass *
44 ThumbRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
45                                               const MachineFunction &MF) const {
46   if (!MF.getSubtarget<ARMSubtarget>().isThumb1Only())
47     return ARMBaseRegisterInfo::getLargestLegalSuperClass(RC, MF);
48 
49   if (ARM::tGPRRegClass.hasSubClassEq(RC))
50     return &ARM::tGPRRegClass;
51   return ARMBaseRegisterInfo::getLargestLegalSuperClass(RC, MF);
52 }
53 
54 const TargetRegisterClass *
55 ThumbRegisterInfo::getPointerRegClass(const MachineFunction &MF,
56                                       unsigned Kind) const {
57   if (!MF.getSubtarget<ARMSubtarget>().isThumb1Only())
58     return ARMBaseRegisterInfo::getPointerRegClass(MF, Kind);
59   return &ARM::tGPRRegClass;
60 }
61 
62 static void emitThumb1LoadConstPool(MachineBasicBlock &MBB,
63                                     MachineBasicBlock::iterator &MBBI,
64                                     const DebugLoc &dl, unsigned DestReg,
65                                     unsigned SubIdx, int Val,
66                                     ARMCC::CondCodes Pred, unsigned PredReg,
67                                     unsigned MIFlags) {
68   MachineFunction &MF = *MBB.getParent();
69   const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
70   const TargetInstrInfo &TII = *STI.getInstrInfo();
71   MachineConstantPool *ConstantPool = MF.getConstantPool();
72   const Constant *C = ConstantInt::get(
73           Type::getInt32Ty(MBB.getParent()->getFunction()->getContext()), Val);
74   unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
75 
76   BuildMI(MBB, MBBI, dl, TII.get(ARM::tLDRpci))
77     .addReg(DestReg, getDefRegState(true), SubIdx)
78     .addConstantPoolIndex(Idx).addImm(Pred).addReg(PredReg)
79     .setMIFlags(MIFlags);
80 }
81 
82 static void emitThumb2LoadConstPool(MachineBasicBlock &MBB,
83                                     MachineBasicBlock::iterator &MBBI,
84                                     const DebugLoc &dl, unsigned DestReg,
85                                     unsigned SubIdx, int Val,
86                                     ARMCC::CondCodes Pred, unsigned PredReg,
87                                     unsigned MIFlags) {
88   MachineFunction &MF = *MBB.getParent();
89   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
90   MachineConstantPool *ConstantPool = MF.getConstantPool();
91   const Constant *C = ConstantInt::get(
92            Type::getInt32Ty(MBB.getParent()->getFunction()->getContext()), Val);
93   unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
94 
95   BuildMI(MBB, MBBI, dl, TII.get(ARM::t2LDRpci))
96     .addReg(DestReg, getDefRegState(true), SubIdx)
97     .addConstantPoolIndex(Idx).addImm((int64_t)ARMCC::AL).addReg(0)
98     .setMIFlags(MIFlags);
99 }
100 
101 /// emitLoadConstPool - Emits a load from constpool to materialize the
102 /// specified immediate.
103 void ThumbRegisterInfo::emitLoadConstPool(
104     MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
105     const DebugLoc &dl, unsigned DestReg, unsigned SubIdx, int Val,
106     ARMCC::CondCodes Pred, unsigned PredReg, unsigned MIFlags) const {
107   MachineFunction &MF = *MBB.getParent();
108   const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
109   if (STI.isThumb1Only()) {
110     assert((isARMLowRegister(DestReg) || isVirtualRegister(DestReg)) &&
111            "Thumb1 does not have ldr to high register");
112     return emitThumb1LoadConstPool(MBB, MBBI, dl, DestReg, SubIdx, Val, Pred,
113                                    PredReg, MIFlags);
114   }
115   return emitThumb2LoadConstPool(MBB, MBBI, dl, DestReg, SubIdx, Val, Pred,
116                                  PredReg, MIFlags);
117 }
118 
119 /// emitThumbRegPlusImmInReg - Emits a series of instructions to materialize
120 /// a destreg = basereg + immediate in Thumb code. Materialize the immediate
121 /// in a register using mov / mvn sequences or load the immediate from a
122 /// constpool entry.
123 static void emitThumbRegPlusImmInReg(
124     MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
125     const DebugLoc &dl, unsigned DestReg, unsigned BaseReg, int NumBytes,
126     bool CanChangeCC, const TargetInstrInfo &TII,
127     const ARMBaseRegisterInfo &MRI, unsigned MIFlags = MachineInstr::NoFlags) {
128   MachineFunction &MF = *MBB.getParent();
129   const ARMSubtarget &ST = MF.getSubtarget<ARMSubtarget>();
130   bool isHigh = !isARMLowRegister(DestReg) ||
131                 (BaseReg != 0 && !isARMLowRegister(BaseReg));
132   bool isSub = false;
133   // Subtract doesn't have high register version. Load the negative value
134   // if either base or dest register is a high register. Also, if do not
135   // issue sub as part of the sequence if condition register is to be
136   // preserved.
137   if (NumBytes < 0 && !isHigh && CanChangeCC) {
138     isSub = true;
139     NumBytes = -NumBytes;
140   }
141   unsigned LdReg = DestReg;
142   if (DestReg == ARM::SP)
143     assert(BaseReg == ARM::SP && "Unexpected!");
144   if (!isARMLowRegister(DestReg) && !MRI.isVirtualRegister(DestReg))
145     LdReg = MF.getRegInfo().createVirtualRegister(&ARM::tGPRRegClass);
146 
147   if (NumBytes <= 255 && NumBytes >= 0 && CanChangeCC) {
148     AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi8), LdReg))
149         .addImm(NumBytes)
150         .setMIFlags(MIFlags);
151   } else if (NumBytes < 0 && NumBytes >= -255 && CanChangeCC) {
152     AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi8), LdReg))
153         .addImm(NumBytes)
154         .setMIFlags(MIFlags);
155     AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TII.get(ARM::tRSB), LdReg))
156         .addReg(LdReg, RegState::Kill)
157         .setMIFlags(MIFlags);
158   } else if (ST.genExecuteOnly()) {
159     BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVi32imm), LdReg)
160       .addImm(NumBytes).setMIFlags(MIFlags);
161   } else
162     MRI.emitLoadConstPool(MBB, MBBI, dl, LdReg, 0, NumBytes, ARMCC::AL, 0,
163                           MIFlags);
164 
165   // Emit add / sub.
166   int Opc = (isSub) ? ARM::tSUBrr
167                     : ((isHigh || !CanChangeCC) ? ARM::tADDhirr : ARM::tADDrr);
168   MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg);
169   if (Opc != ARM::tADDhirr)
170     MIB = AddDefaultT1CC(MIB);
171   if (DestReg == ARM::SP || isSub)
172     MIB.addReg(BaseReg).addReg(LdReg, RegState::Kill);
173   else
174     MIB.addReg(LdReg).addReg(BaseReg, RegState::Kill);
175   AddDefaultPred(MIB);
176 }
177 
178 /// emitThumbRegPlusImmediate - Emits a series of instructions to materialize
179 /// a destreg = basereg + immediate in Thumb code. Tries a series of ADDs or
180 /// SUBs first, and uses a constant pool value if the instruction sequence would
181 /// be too long. This is allowed to modify the condition flags.
182 void llvm::emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
183                                      MachineBasicBlock::iterator &MBBI,
184                                      const DebugLoc &dl, unsigned DestReg,
185                                      unsigned BaseReg, int NumBytes,
186                                      const TargetInstrInfo &TII,
187                                      const ARMBaseRegisterInfo &MRI,
188                                      unsigned MIFlags) {
189   bool isSub = NumBytes < 0;
190   unsigned Bytes = (unsigned)NumBytes;
191   if (isSub) Bytes = -NumBytes;
192 
193   int CopyOpc = 0;
194   unsigned CopyBits = 0;
195   unsigned CopyScale = 1;
196   bool CopyNeedsCC = false;
197   int ExtraOpc = 0;
198   unsigned ExtraBits = 0;
199   unsigned ExtraScale = 1;
200   bool ExtraNeedsCC = false;
201 
202   // Strategy:
203   // We need to select two types of instruction, maximizing the available
204   // immediate range of each. The instructions we use will depend on whether
205   // DestReg and BaseReg are low, high or the stack pointer.
206   // * CopyOpc  - DestReg = BaseReg + imm
207   //              This will be emitted once if DestReg != BaseReg, and never if
208   //              DestReg == BaseReg.
209   // * ExtraOpc - DestReg = DestReg + imm
210   //              This will be emitted as many times as necessary to add the
211   //              full immediate.
212   // If the immediate ranges of these instructions are not large enough to cover
213   // NumBytes with a reasonable number of instructions, we fall back to using a
214   // value loaded from a constant pool.
215   if (DestReg == ARM::SP) {
216     if (BaseReg == ARM::SP) {
217       // sp -> sp
218       // Already in right reg, no copy needed
219     } else {
220       // low -> sp or high -> sp
221       CopyOpc = ARM::tMOVr;
222       CopyBits = 0;
223     }
224     ExtraOpc = isSub ? ARM::tSUBspi : ARM::tADDspi;
225     ExtraBits = 7;
226     ExtraScale = 4;
227   } else if (isARMLowRegister(DestReg)) {
228     if (BaseReg == ARM::SP) {
229       // sp -> low
230       assert(!isSub && "Thumb1 does not have tSUBrSPi");
231       CopyOpc = ARM::tADDrSPi;
232       CopyBits = 8;
233       CopyScale = 4;
234     } else if (DestReg == BaseReg) {
235       // low -> same low
236       // Already in right reg, no copy needed
237     } else if (isARMLowRegister(BaseReg)) {
238       // low -> different low
239       CopyOpc = isSub ? ARM::tSUBi3 : ARM::tADDi3;
240       CopyBits = 3;
241       CopyNeedsCC = true;
242     } else {
243       // high -> low
244       CopyOpc = ARM::tMOVr;
245       CopyBits = 0;
246     }
247     ExtraOpc = isSub ? ARM::tSUBi8 : ARM::tADDi8;
248     ExtraBits = 8;
249     ExtraNeedsCC = true;
250   } else /* DestReg is high */ {
251     if (DestReg == BaseReg) {
252       // high -> same high
253       // Already in right reg, no copy needed
254     } else {
255       // {low,high,sp} -> high
256       CopyOpc = ARM::tMOVr;
257       CopyBits = 0;
258     }
259     ExtraOpc = 0;
260   }
261 
262   // We could handle an unaligned immediate with an unaligned copy instruction
263   // and an aligned extra instruction, but this case is not currently needed.
264   assert(((Bytes & 3) == 0 || ExtraScale == 1) &&
265          "Unaligned offset, but all instructions require alignment");
266 
267   unsigned CopyRange = ((1 << CopyBits) - 1) * CopyScale;
268   // If we would emit the copy with an immediate of 0, just use tMOVr.
269   if (CopyOpc && Bytes < CopyScale) {
270     CopyOpc = ARM::tMOVr;
271     CopyScale = 1;
272     CopyNeedsCC = false;
273     CopyRange = 0;
274   }
275   unsigned ExtraRange = ((1 << ExtraBits) - 1) * ExtraScale; // per instruction
276   unsigned RequiredCopyInstrs = CopyOpc ? 1 : 0;
277   unsigned RangeAfterCopy = (CopyRange > Bytes) ? 0 : (Bytes - CopyRange);
278 
279   // We could handle this case when the copy instruction does not require an
280   // aligned immediate, but we do not currently do this.
281   assert(RangeAfterCopy % ExtraScale == 0 &&
282          "Extra instruction requires immediate to be aligned");
283 
284   unsigned RequiredExtraInstrs;
285   if (ExtraRange)
286     RequiredExtraInstrs = alignTo(RangeAfterCopy, ExtraRange) / ExtraRange;
287   else if (RangeAfterCopy > 0)
288     // We need an extra instruction but none is available
289     RequiredExtraInstrs = 1000000;
290   else
291     RequiredExtraInstrs = 0;
292   unsigned RequiredInstrs = RequiredCopyInstrs + RequiredExtraInstrs;
293   unsigned Threshold = (DestReg == ARM::SP) ? 3 : 2;
294 
295   // Use a constant pool, if the sequence of ADDs/SUBs is too expensive.
296   if (RequiredInstrs > Threshold) {
297     emitThumbRegPlusImmInReg(MBB, MBBI, dl,
298                              DestReg, BaseReg, NumBytes, true,
299                              TII, MRI, MIFlags);
300     return;
301   }
302 
303   // Emit zero or one copy instructions
304   if (CopyOpc) {
305     unsigned CopyImm = std::min(Bytes, CopyRange) / CopyScale;
306     Bytes -= CopyImm * CopyScale;
307 
308     MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(CopyOpc), DestReg);
309     if (CopyNeedsCC)
310       MIB = AddDefaultT1CC(MIB);
311     MIB.addReg(BaseReg, RegState::Kill);
312     if (CopyOpc != ARM::tMOVr) {
313       MIB.addImm(CopyImm);
314     }
315     AddDefaultPred(MIB.setMIFlags(MIFlags));
316 
317     BaseReg = DestReg;
318   }
319 
320   // Emit zero or more in-place add/sub instructions
321   while (Bytes) {
322     unsigned ExtraImm = std::min(Bytes, ExtraRange) / ExtraScale;
323     Bytes -= ExtraImm * ExtraScale;
324 
325     MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(ExtraOpc), DestReg);
326     if (ExtraNeedsCC)
327       MIB = AddDefaultT1CC(MIB);
328     MIB.addReg(BaseReg).addImm(ExtraImm);
329     MIB = AddDefaultPred(MIB);
330     MIB.setMIFlags(MIFlags);
331   }
332 }
333 
334 static void removeOperands(MachineInstr &MI, unsigned i) {
335   unsigned Op = i;
336   for (unsigned e = MI.getNumOperands(); i != e; ++i)
337     MI.RemoveOperand(Op);
338 }
339 
340 /// convertToNonSPOpcode - Change the opcode to the non-SP version, because
341 /// we're replacing the frame index with a non-SP register.
342 static unsigned convertToNonSPOpcode(unsigned Opcode) {
343   switch (Opcode) {
344   case ARM::tLDRspi:
345     return ARM::tLDRi;
346 
347   case ARM::tSTRspi:
348     return ARM::tSTRi;
349   }
350 
351   return Opcode;
352 }
353 
354 bool ThumbRegisterInfo::rewriteFrameIndex(MachineBasicBlock::iterator II,
355                                           unsigned FrameRegIdx,
356                                           unsigned FrameReg, int &Offset,
357                                           const ARMBaseInstrInfo &TII) const {
358   MachineInstr &MI = *II;
359   MachineBasicBlock &MBB = *MI.getParent();
360   assert(MBB.getParent()->getSubtarget<ARMSubtarget>().isThumb1Only() &&
361          "This isn't needed for thumb2!");
362   DebugLoc dl = MI.getDebugLoc();
363   MachineInstrBuilder MIB(*MBB.getParent(), &MI);
364   unsigned Opcode = MI.getOpcode();
365   const MCInstrDesc &Desc = MI.getDesc();
366   unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
367 
368   if (Opcode == ARM::tADDframe) {
369     Offset += MI.getOperand(FrameRegIdx+1).getImm();
370     unsigned DestReg = MI.getOperand(0).getReg();
371 
372     emitThumbRegPlusImmediate(MBB, II, dl, DestReg, FrameReg, Offset, TII,
373                               *this);
374     MBB.erase(II);
375     return true;
376   } else {
377     if (AddrMode != ARMII::AddrModeT1_s)
378       llvm_unreachable("Unsupported addressing mode!");
379 
380     unsigned ImmIdx = FrameRegIdx + 1;
381     int InstrOffs = MI.getOperand(ImmIdx).getImm();
382     unsigned NumBits = (FrameReg == ARM::SP) ? 8 : 5;
383     unsigned Scale = 4;
384 
385     Offset += InstrOffs * Scale;
386     assert((Offset & (Scale - 1)) == 0 && "Can't encode this offset!");
387 
388     // Common case: small offset, fits into instruction.
389     MachineOperand &ImmOp = MI.getOperand(ImmIdx);
390     int ImmedOffset = Offset / Scale;
391     unsigned Mask = (1 << NumBits) - 1;
392 
393     if ((unsigned)Offset <= Mask * Scale) {
394       // Replace the FrameIndex with the frame register (e.g., sp).
395       MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
396       ImmOp.ChangeToImmediate(ImmedOffset);
397 
398       // If we're using a register where sp was stored, convert the instruction
399       // to the non-SP version.
400       unsigned NewOpc = convertToNonSPOpcode(Opcode);
401       if (NewOpc != Opcode && FrameReg != ARM::SP)
402         MI.setDesc(TII.get(NewOpc));
403 
404       return true;
405     }
406 
407     NumBits = 5;
408     Mask = (1 << NumBits) - 1;
409 
410     // If this is a thumb spill / restore, we will be using a constpool load to
411     // materialize the offset.
412     if (Opcode == ARM::tLDRspi || Opcode == ARM::tSTRspi) {
413       ImmOp.ChangeToImmediate(0);
414     } else {
415       // Otherwise, it didn't fit. Pull in what we can to simplify the immed.
416       ImmedOffset = ImmedOffset & Mask;
417       ImmOp.ChangeToImmediate(ImmedOffset);
418       Offset &= ~(Mask * Scale);
419     }
420   }
421 
422   return Offset == 0;
423 }
424 
425 void ThumbRegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
426                                            int64_t Offset) const {
427   const MachineFunction &MF = *MI.getParent()->getParent();
428   const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
429   if (!STI.isThumb1Only())
430     return ARMBaseRegisterInfo::resolveFrameIndex(MI, BaseReg, Offset);
431 
432   const ARMBaseInstrInfo &TII = *STI.getInstrInfo();
433   int Off = Offset; // ARM doesn't need the general 64-bit offsets
434   unsigned i = 0;
435 
436   while (!MI.getOperand(i).isFI()) {
437     ++i;
438     assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
439   }
440   bool Done = rewriteFrameIndex(MI, i, BaseReg, Off, TII);
441   assert (Done && "Unable to resolve frame index!");
442   (void)Done;
443 }
444 
445 /// saveScavengerRegister - Spill the register so it can be used by the
446 /// register scavenger. Return true.
447 bool ThumbRegisterInfo::saveScavengerRegister(
448     MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
449     MachineBasicBlock::iterator &UseMI, const TargetRegisterClass *RC,
450     unsigned Reg) const {
451 
452   const ARMSubtarget &STI = MBB.getParent()->getSubtarget<ARMSubtarget>();
453   if (!STI.isThumb1Only())
454     return ARMBaseRegisterInfo::saveScavengerRegister(MBB, I, UseMI, RC, Reg);
455 
456   // Thumb1 can't use the emergency spill slot on the stack because
457   // ldr/str immediate offsets must be positive, and if we're referencing
458   // off the frame pointer (if, for example, there are alloca() calls in
459   // the function, the offset will be negative. Use R12 instead since that's
460   // a call clobbered register that we know won't be used in Thumb1 mode.
461   const TargetInstrInfo &TII = *STI.getInstrInfo();
462   DebugLoc DL;
463   AddDefaultPred(BuildMI(MBB, I, DL, TII.get(ARM::tMOVr))
464     .addReg(ARM::R12, RegState::Define)
465     .addReg(Reg, RegState::Kill));
466 
467   // The UseMI is where we would like to restore the register. If there's
468   // interference with R12 before then, however, we'll need to restore it
469   // before that instead and adjust the UseMI.
470   bool done = false;
471   for (MachineBasicBlock::iterator II = I; !done && II != UseMI ; ++II) {
472     if (II->isDebugValue())
473       continue;
474     // If this instruction affects R12, adjust our restore point.
475     for (unsigned i = 0, e = II->getNumOperands(); i != e; ++i) {
476       const MachineOperand &MO = II->getOperand(i);
477       if (MO.isRegMask() && MO.clobbersPhysReg(ARM::R12)) {
478         UseMI = II;
479         done = true;
480         break;
481       }
482       if (!MO.isReg() || MO.isUndef() || !MO.getReg() ||
483           TargetRegisterInfo::isVirtualRegister(MO.getReg()))
484         continue;
485       if (MO.getReg() == ARM::R12) {
486         UseMI = II;
487         done = true;
488         break;
489       }
490     }
491   }
492   // Restore the register from R12
493   AddDefaultPred(BuildMI(MBB, UseMI, DL, TII.get(ARM::tMOVr)).
494     addReg(Reg, RegState::Define).addReg(ARM::R12, RegState::Kill));
495 
496   return true;
497 }
498 
499 void ThumbRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
500                                             int SPAdj, unsigned FIOperandNum,
501                                             RegScavenger *RS) const {
502   MachineInstr &MI = *II;
503   MachineBasicBlock &MBB = *MI.getParent();
504   MachineFunction &MF = *MBB.getParent();
505   const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
506   if (!STI.isThumb1Only())
507     return ARMBaseRegisterInfo::eliminateFrameIndex(II, SPAdj, FIOperandNum,
508                                                     RS);
509 
510   unsigned VReg = 0;
511   const ARMBaseInstrInfo &TII = *STI.getInstrInfo();
512   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
513   DebugLoc dl = MI.getDebugLoc();
514   MachineInstrBuilder MIB(*MBB.getParent(), &MI);
515 
516   unsigned FrameReg = ARM::SP;
517   int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
518   int Offset = MF.getFrameInfo().getObjectOffset(FrameIndex) +
519                MF.getFrameInfo().getStackSize() + SPAdj;
520 
521   if (MF.getFrameInfo().hasVarSizedObjects()) {
522     assert(SPAdj == 0 && STI.getFrameLowering()->hasFP(MF) && "Unexpected");
523     // There are alloca()'s in this function, must reference off the frame
524     // pointer or base pointer instead.
525     if (!hasBasePointer(MF)) {
526       FrameReg = getFrameRegister(MF);
527       Offset -= AFI->getFramePtrSpillOffset();
528     } else
529       FrameReg = BasePtr;
530   }
531 
532   // PEI::scavengeFrameVirtualRegs() cannot accurately track SPAdj because the
533   // call frame setup/destroy instructions have already been eliminated.  That
534   // means the stack pointer cannot be used to access the emergency spill slot
535   // when !hasReservedCallFrame().
536 #ifndef NDEBUG
537   if (RS && FrameReg == ARM::SP && RS->isScavengingFrameIndex(FrameIndex)){
538     assert(STI.getFrameLowering()->hasReservedCallFrame(MF) &&
539            "Cannot use SP to access the emergency spill slot in "
540            "functions without a reserved call frame");
541     assert(!MF.getFrameInfo().hasVarSizedObjects() &&
542            "Cannot use SP to access the emergency spill slot in "
543            "functions with variable sized frame objects");
544   }
545 #endif // NDEBUG
546 
547   // Special handling of dbg_value instructions.
548   if (MI.isDebugValue()) {
549     MI.getOperand(FIOperandNum).  ChangeToRegister(FrameReg, false /*isDef*/);
550     MI.getOperand(FIOperandNum+1).ChangeToImmediate(Offset);
551     return;
552   }
553 
554   // Modify MI as necessary to handle as much of 'Offset' as possible
555   assert(AFI->isThumbFunction() &&
556          "This eliminateFrameIndex only supports Thumb1!");
557   if (rewriteFrameIndex(MI, FIOperandNum, FrameReg, Offset, TII))
558     return;
559 
560   // If we get here, the immediate doesn't fit into the instruction.  We folded
561   // as much as possible above, handle the rest, providing a register that is
562   // SP+LargeImm.
563   assert(Offset && "This code isn't needed if offset already handled!");
564 
565   unsigned Opcode = MI.getOpcode();
566 
567   // Remove predicate first.
568   int PIdx = MI.findFirstPredOperandIdx();
569   if (PIdx != -1)
570     removeOperands(MI, PIdx);
571 
572   if (MI.mayLoad()) {
573     // Use the destination register to materialize sp + offset.
574     unsigned TmpReg = MI.getOperand(0).getReg();
575     bool UseRR = false;
576     if (Opcode == ARM::tLDRspi) {
577       if (FrameReg == ARM::SP || STI.genExecuteOnly())
578         emitThumbRegPlusImmInReg(MBB, II, dl, TmpReg, FrameReg,
579                                  Offset, false, TII, *this);
580       else {
581         emitLoadConstPool(MBB, II, dl, TmpReg, 0, Offset);
582         UseRR = true;
583       }
584     } else {
585       emitThumbRegPlusImmediate(MBB, II, dl, TmpReg, FrameReg, Offset, TII,
586                                 *this);
587     }
588 
589     MI.setDesc(TII.get(UseRR ? ARM::tLDRr : ARM::tLDRi));
590     MI.getOperand(FIOperandNum).ChangeToRegister(TmpReg, false, false, true);
591     if (UseRR)
592       // Use [reg, reg] addrmode. Replace the immediate operand w/ the frame
593       // register. The offset is already handled in the vreg value.
594       MI.getOperand(FIOperandNum+1).ChangeToRegister(FrameReg, false, false,
595                                                      false);
596   } else if (MI.mayStore()) {
597       VReg = MF.getRegInfo().createVirtualRegister(&ARM::tGPRRegClass);
598       bool UseRR = false;
599 
600       if (Opcode == ARM::tSTRspi) {
601         if (FrameReg == ARM::SP || STI.genExecuteOnly())
602           emitThumbRegPlusImmInReg(MBB, II, dl, VReg, FrameReg,
603                                    Offset, false, TII, *this);
604         else {
605           emitLoadConstPool(MBB, II, dl, VReg, 0, Offset);
606           UseRR = true;
607         }
608       } else
609         emitThumbRegPlusImmediate(MBB, II, dl, VReg, FrameReg, Offset, TII,
610                                   *this);
611       MI.setDesc(TII.get(UseRR ? ARM::tSTRr : ARM::tSTRi));
612       MI.getOperand(FIOperandNum).ChangeToRegister(VReg, false, false, true);
613       if (UseRR)
614         // Use [reg, reg] addrmode. Replace the immediate operand w/ the frame
615         // register. The offset is already handled in the vreg value.
616         MI.getOperand(FIOperandNum+1).ChangeToRegister(FrameReg, false, false,
617                                                        false);
618   } else {
619     llvm_unreachable("Unexpected opcode!");
620   }
621 
622   // Add predicate back if it's needed.
623   if (MI.isPredicable())
624     AddDefaultPred(MIB);
625 }
626