1 //===-- ARMBaseInstrInfo.cpp - ARM Instruction Information ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the Base ARM implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "ARMBaseInstrInfo.h"
14 #include "ARMBaseRegisterInfo.h"
15 #include "ARMConstantPoolValue.h"
16 #include "ARMFeatures.h"
17 #include "ARMHazardRecognizer.h"
18 #include "ARMMachineFunctionInfo.h"
19 #include "ARMSubtarget.h"
20 #include "MCTargetDesc/ARMAddressingModes.h"
21 #include "MCTargetDesc/ARMBaseInfo.h"
22 #include "MVETailPredUtils.h"
23 #include "llvm/ADT/DenseMap.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/ADT/SmallSet.h"
26 #include "llvm/ADT/SmallVector.h"
27 #include "llvm/ADT/Triple.h"
28 #include "llvm/CodeGen/LiveVariables.h"
29 #include "llvm/CodeGen/MachineBasicBlock.h"
30 #include "llvm/CodeGen/MachineConstantPool.h"
31 #include "llvm/CodeGen/MachineFrameInfo.h"
32 #include "llvm/CodeGen/MachineFunction.h"
33 #include "llvm/CodeGen/MachineInstr.h"
34 #include "llvm/CodeGen/MachineInstrBuilder.h"
35 #include "llvm/CodeGen/MachineMemOperand.h"
36 #include "llvm/CodeGen/MachineModuleInfo.h"
37 #include "llvm/CodeGen/MachineOperand.h"
38 #include "llvm/CodeGen/MachineRegisterInfo.h"
39 #include "llvm/CodeGen/MachineScheduler.h"
40 #include "llvm/CodeGen/MultiHazardRecognizer.h"
41 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
42 #include "llvm/CodeGen/SelectionDAGNodes.h"
43 #include "llvm/CodeGen/TargetInstrInfo.h"
44 #include "llvm/CodeGen/TargetRegisterInfo.h"
45 #include "llvm/CodeGen/TargetSchedule.h"
46 #include "llvm/IR/Attributes.h"
47 #include "llvm/IR/Constants.h"
48 #include "llvm/IR/DebugLoc.h"
49 #include "llvm/IR/Function.h"
50 #include "llvm/IR/GlobalValue.h"
51 #include "llvm/MC/MCAsmInfo.h"
52 #include "llvm/MC/MCInstrDesc.h"
53 #include "llvm/MC/MCInstrItineraries.h"
54 #include "llvm/Support/BranchProbability.h"
55 #include "llvm/Support/Casting.h"
56 #include "llvm/Support/CommandLine.h"
57 #include "llvm/Support/Compiler.h"
58 #include "llvm/Support/Debug.h"
59 #include "llvm/Support/ErrorHandling.h"
60 #include "llvm/Support/raw_ostream.h"
61 #include "llvm/Target/TargetMachine.h"
62 #include <algorithm>
63 #include <cassert>
64 #include <cstdint>
65 #include <iterator>
66 #include <new>
67 #include <utility>
68 #include <vector>
69 
70 using namespace llvm;
71 
72 #define DEBUG_TYPE "arm-instrinfo"
73 
74 #define GET_INSTRINFO_CTOR_DTOR
75 #include "ARMGenInstrInfo.inc"
76 
77 static cl::opt<bool>
78 EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
79                cl::desc("Enable ARM 2-addr to 3-addr conv"));
80 
81 /// ARM_MLxEntry - Record information about MLA / MLS instructions.
82 struct ARM_MLxEntry {
83   uint16_t MLxOpc;     // MLA / MLS opcode
84   uint16_t MulOpc;     // Expanded multiplication opcode
85   uint16_t AddSubOpc;  // Expanded add / sub opcode
86   bool NegAcc;         // True if the acc is negated before the add / sub.
87   bool HasLane;        // True if instruction has an extra "lane" operand.
88 };
89 
90 static const ARM_MLxEntry ARM_MLxTable[] = {
91   // MLxOpc,          MulOpc,           AddSubOpc,       NegAcc, HasLane
92   // fp scalar ops
93   { ARM::VMLAS,       ARM::VMULS,       ARM::VADDS,      false,  false },
94   { ARM::VMLSS,       ARM::VMULS,       ARM::VSUBS,      false,  false },
95   { ARM::VMLAD,       ARM::VMULD,       ARM::VADDD,      false,  false },
96   { ARM::VMLSD,       ARM::VMULD,       ARM::VSUBD,      false,  false },
97   { ARM::VNMLAS,      ARM::VNMULS,      ARM::VSUBS,      true,   false },
98   { ARM::VNMLSS,      ARM::VMULS,       ARM::VSUBS,      true,   false },
99   { ARM::VNMLAD,      ARM::VNMULD,      ARM::VSUBD,      true,   false },
100   { ARM::VNMLSD,      ARM::VMULD,       ARM::VSUBD,      true,   false },
101 
102   // fp SIMD ops
103   { ARM::VMLAfd,      ARM::VMULfd,      ARM::VADDfd,     false,  false },
104   { ARM::VMLSfd,      ARM::VMULfd,      ARM::VSUBfd,     false,  false },
105   { ARM::VMLAfq,      ARM::VMULfq,      ARM::VADDfq,     false,  false },
106   { ARM::VMLSfq,      ARM::VMULfq,      ARM::VSUBfq,     false,  false },
107   { ARM::VMLAslfd,    ARM::VMULslfd,    ARM::VADDfd,     false,  true  },
108   { ARM::VMLSslfd,    ARM::VMULslfd,    ARM::VSUBfd,     false,  true  },
109   { ARM::VMLAslfq,    ARM::VMULslfq,    ARM::VADDfq,     false,  true  },
110   { ARM::VMLSslfq,    ARM::VMULslfq,    ARM::VSUBfq,     false,  true  },
111 };
112 
113 ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget& STI)
114   : ARMGenInstrInfo(ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP),
115     Subtarget(STI) {
116   for (unsigned i = 0, e = array_lengthof(ARM_MLxTable); i != e; ++i) {
117     if (!MLxEntryMap.insert(std::make_pair(ARM_MLxTable[i].MLxOpc, i)).second)
118       llvm_unreachable("Duplicated entries?");
119     MLxHazardOpcodes.insert(ARM_MLxTable[i].AddSubOpc);
120     MLxHazardOpcodes.insert(ARM_MLxTable[i].MulOpc);
121   }
122 }
123 
124 // Use a ScoreboardHazardRecognizer for prepass ARM scheduling. TargetInstrImpl
125 // currently defaults to no prepass hazard recognizer.
126 ScheduleHazardRecognizer *
127 ARMBaseInstrInfo::CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
128                                                const ScheduleDAG *DAG) const {
129   if (usePreRAHazardRecognizer()) {
130     const InstrItineraryData *II =
131         static_cast<const ARMSubtarget *>(STI)->getInstrItineraryData();
132     return new ScoreboardHazardRecognizer(II, DAG, "pre-RA-sched");
133   }
134   return TargetInstrInfo::CreateTargetHazardRecognizer(STI, DAG);
135 }
136 
137 // Called during:
138 // - pre-RA scheduling
139 // - post-RA scheduling when FeatureUseMISched is set
140 ScheduleHazardRecognizer *ARMBaseInstrInfo::CreateTargetMIHazardRecognizer(
141     const InstrItineraryData *II, const ScheduleDAGMI *DAG) const {
142   MultiHazardRecognizer *MHR = new MultiHazardRecognizer();
143 
144   // We would like to restrict this hazard recognizer to only
145   // post-RA scheduling; we can tell that we're post-RA because we don't
146   // track VRegLiveness.
147   // Cortex-M7: TRM indicates that there is a single ITCM bank and two DTCM
148   //            banks banked on bit 2.  Assume that TCMs are in use.
149   if (Subtarget.isCortexM7() && !DAG->hasVRegLiveness())
150     MHR->AddHazardRecognizer(
151         std::make_unique<ARMBankConflictHazardRecognizer>(DAG, 0x4, true));
152 
153   // Not inserting ARMHazardRecognizerFPMLx because that would change
154   // legacy behavior
155 
156   auto BHR = TargetInstrInfo::CreateTargetMIHazardRecognizer(II, DAG);
157   MHR->AddHazardRecognizer(std::unique_ptr<ScheduleHazardRecognizer>(BHR));
158   return MHR;
159 }
160 
161 // Called during post-RA scheduling when FeatureUseMISched is not set
162 ScheduleHazardRecognizer *ARMBaseInstrInfo::
163 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
164                                    const ScheduleDAG *DAG) const {
165   MultiHazardRecognizer *MHR = new MultiHazardRecognizer();
166 
167   if (Subtarget.isThumb2() || Subtarget.hasVFP2Base())
168     MHR->AddHazardRecognizer(std::make_unique<ARMHazardRecognizerFPMLx>());
169 
170   auto BHR = TargetInstrInfo::CreateTargetPostRAHazardRecognizer(II, DAG);
171   if (BHR)
172     MHR->AddHazardRecognizer(std::unique_ptr<ScheduleHazardRecognizer>(BHR));
173   return MHR;
174 }
175 
176 MachineInstr *
177 ARMBaseInstrInfo::convertToThreeAddress(MachineInstr &MI, LiveVariables *LV,
178                                         LiveIntervals *LIS) const {
179   // FIXME: Thumb2 support.
180 
181   if (!EnableARM3Addr)
182     return nullptr;
183 
184   MachineFunction &MF = *MI.getParent()->getParent();
185   uint64_t TSFlags = MI.getDesc().TSFlags;
186   bool isPre = false;
187   switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) {
188   default: return nullptr;
189   case ARMII::IndexModePre:
190     isPre = true;
191     break;
192   case ARMII::IndexModePost:
193     break;
194   }
195 
196   // Try splitting an indexed load/store to an un-indexed one plus an add/sub
197   // operation.
198   unsigned MemOpc = getUnindexedOpcode(MI.getOpcode());
199   if (MemOpc == 0)
200     return nullptr;
201 
202   MachineInstr *UpdateMI = nullptr;
203   MachineInstr *MemMI = nullptr;
204   unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
205   const MCInstrDesc &MCID = MI.getDesc();
206   unsigned NumOps = MCID.getNumOperands();
207   bool isLoad = !MI.mayStore();
208   const MachineOperand &WB = isLoad ? MI.getOperand(1) : MI.getOperand(0);
209   const MachineOperand &Base = MI.getOperand(2);
210   const MachineOperand &Offset = MI.getOperand(NumOps - 3);
211   Register WBReg = WB.getReg();
212   Register BaseReg = Base.getReg();
213   Register OffReg = Offset.getReg();
214   unsigned OffImm = MI.getOperand(NumOps - 2).getImm();
215   ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI.getOperand(NumOps - 1).getImm();
216   switch (AddrMode) {
217   default: llvm_unreachable("Unknown indexed op!");
218   case ARMII::AddrMode2: {
219     bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub;
220     unsigned Amt = ARM_AM::getAM2Offset(OffImm);
221     if (OffReg == 0) {
222       if (ARM_AM::getSOImmVal(Amt) == -1)
223         // Can't encode it in a so_imm operand. This transformation will
224         // add more than 1 instruction. Abandon!
225         return nullptr;
226       UpdateMI = BuildMI(MF, MI.getDebugLoc(),
227                          get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
228                      .addReg(BaseReg)
229                      .addImm(Amt)
230                      .add(predOps(Pred))
231                      .add(condCodeOp());
232     } else if (Amt != 0) {
233       ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm);
234       unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt);
235       UpdateMI = BuildMI(MF, MI.getDebugLoc(),
236                          get(isSub ? ARM::SUBrsi : ARM::ADDrsi), WBReg)
237                      .addReg(BaseReg)
238                      .addReg(OffReg)
239                      .addReg(0)
240                      .addImm(SOOpc)
241                      .add(predOps(Pred))
242                      .add(condCodeOp());
243     } else
244       UpdateMI = BuildMI(MF, MI.getDebugLoc(),
245                          get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
246                      .addReg(BaseReg)
247                      .addReg(OffReg)
248                      .add(predOps(Pred))
249                      .add(condCodeOp());
250     break;
251   }
252   case ARMII::AddrMode3 : {
253     bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub;
254     unsigned Amt = ARM_AM::getAM3Offset(OffImm);
255     if (OffReg == 0)
256       // Immediate is 8-bits. It's guaranteed to fit in a so_imm operand.
257       UpdateMI = BuildMI(MF, MI.getDebugLoc(),
258                          get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
259                      .addReg(BaseReg)
260                      .addImm(Amt)
261                      .add(predOps(Pred))
262                      .add(condCodeOp());
263     else
264       UpdateMI = BuildMI(MF, MI.getDebugLoc(),
265                          get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
266                      .addReg(BaseReg)
267                      .addReg(OffReg)
268                      .add(predOps(Pred))
269                      .add(condCodeOp());
270     break;
271   }
272   }
273 
274   std::vector<MachineInstr*> NewMIs;
275   if (isPre) {
276     if (isLoad)
277       MemMI =
278           BuildMI(MF, MI.getDebugLoc(), get(MemOpc), MI.getOperand(0).getReg())
279               .addReg(WBReg)
280               .addImm(0)
281               .addImm(Pred);
282     else
283       MemMI = BuildMI(MF, MI.getDebugLoc(), get(MemOpc))
284                   .addReg(MI.getOperand(1).getReg())
285                   .addReg(WBReg)
286                   .addReg(0)
287                   .addImm(0)
288                   .addImm(Pred);
289     NewMIs.push_back(MemMI);
290     NewMIs.push_back(UpdateMI);
291   } else {
292     if (isLoad)
293       MemMI =
294           BuildMI(MF, MI.getDebugLoc(), get(MemOpc), MI.getOperand(0).getReg())
295               .addReg(BaseReg)
296               .addImm(0)
297               .addImm(Pred);
298     else
299       MemMI = BuildMI(MF, MI.getDebugLoc(), get(MemOpc))
300                   .addReg(MI.getOperand(1).getReg())
301                   .addReg(BaseReg)
302                   .addReg(0)
303                   .addImm(0)
304                   .addImm(Pred);
305     if (WB.isDead())
306       UpdateMI->getOperand(0).setIsDead();
307     NewMIs.push_back(UpdateMI);
308     NewMIs.push_back(MemMI);
309   }
310 
311   // Transfer LiveVariables states, kill / dead info.
312   if (LV) {
313     for (const MachineOperand &MO : MI.operands()) {
314       if (MO.isReg() && Register::isVirtualRegister(MO.getReg())) {
315         Register Reg = MO.getReg();
316 
317         LiveVariables::VarInfo &VI = LV->getVarInfo(Reg);
318         if (MO.isDef()) {
319           MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
320           if (MO.isDead())
321             LV->addVirtualRegisterDead(Reg, *NewMI);
322         }
323         if (MO.isUse() && MO.isKill()) {
324           for (unsigned j = 0; j < 2; ++j) {
325             // Look at the two new MI's in reverse order.
326             MachineInstr *NewMI = NewMIs[j];
327             if (!NewMI->readsRegister(Reg))
328               continue;
329             LV->addVirtualRegisterKilled(Reg, *NewMI);
330             if (VI.removeKill(MI))
331               VI.Kills.push_back(NewMI);
332             break;
333           }
334         }
335       }
336     }
337   }
338 
339   MachineBasicBlock &MBB = *MI.getParent();
340   MBB.insert(MI, NewMIs[1]);
341   MBB.insert(MI, NewMIs[0]);
342   return NewMIs[0];
343 }
344 
345 // Branch analysis.
346 bool ARMBaseInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
347                                      MachineBasicBlock *&TBB,
348                                      MachineBasicBlock *&FBB,
349                                      SmallVectorImpl<MachineOperand> &Cond,
350                                      bool AllowModify) const {
351   TBB = nullptr;
352   FBB = nullptr;
353 
354   MachineBasicBlock::instr_iterator I = MBB.instr_end();
355   if (I == MBB.instr_begin())
356     return false; // Empty blocks are easy.
357   --I;
358 
359   // Walk backwards from the end of the basic block until the branch is
360   // analyzed or we give up.
361   while (isPredicated(*I) || I->isTerminator() || I->isDebugValue()) {
362     // Flag to be raised on unanalyzeable instructions. This is useful in cases
363     // where we want to clean up on the end of the basic block before we bail
364     // out.
365     bool CantAnalyze = false;
366 
367     // Skip over DEBUG values, predicated nonterminators and speculation
368     // barrier terminators.
369     while (I->isDebugInstr() || !I->isTerminator() ||
370            isSpeculationBarrierEndBBOpcode(I->getOpcode()) ||
371            I->getOpcode() == ARM::t2DoLoopStartTP){
372       if (I == MBB.instr_begin())
373         return false;
374       --I;
375     }
376 
377     if (isIndirectBranchOpcode(I->getOpcode()) ||
378         isJumpTableBranchOpcode(I->getOpcode())) {
379       // Indirect branches and jump tables can't be analyzed, but we still want
380       // to clean up any instructions at the tail of the basic block.
381       CantAnalyze = true;
382     } else if (isUncondBranchOpcode(I->getOpcode())) {
383       TBB = I->getOperand(0).getMBB();
384     } else if (isCondBranchOpcode(I->getOpcode())) {
385       // Bail out if we encounter multiple conditional branches.
386       if (!Cond.empty())
387         return true;
388 
389       assert(!FBB && "FBB should have been null.");
390       FBB = TBB;
391       TBB = I->getOperand(0).getMBB();
392       Cond.push_back(I->getOperand(1));
393       Cond.push_back(I->getOperand(2));
394     } else if (I->isReturn()) {
395       // Returns can't be analyzed, but we should run cleanup.
396       CantAnalyze = true;
397     } else {
398       // We encountered other unrecognized terminator. Bail out immediately.
399       return true;
400     }
401 
402     // Cleanup code - to be run for unpredicated unconditional branches and
403     //                returns.
404     if (!isPredicated(*I) &&
405           (isUncondBranchOpcode(I->getOpcode()) ||
406            isIndirectBranchOpcode(I->getOpcode()) ||
407            isJumpTableBranchOpcode(I->getOpcode()) ||
408            I->isReturn())) {
409       // Forget any previous condition branch information - it no longer applies.
410       Cond.clear();
411       FBB = nullptr;
412 
413       // If we can modify the function, delete everything below this
414       // unconditional branch.
415       if (AllowModify) {
416         MachineBasicBlock::iterator DI = std::next(I);
417         while (DI != MBB.instr_end()) {
418           MachineInstr &InstToDelete = *DI;
419           ++DI;
420           // Speculation barriers must not be deleted.
421           if (isSpeculationBarrierEndBBOpcode(InstToDelete.getOpcode()))
422             continue;
423           InstToDelete.eraseFromParent();
424         }
425       }
426     }
427 
428     if (CantAnalyze) {
429       // We may not be able to analyze the block, but we could still have
430       // an unconditional branch as the last instruction in the block, which
431       // just branches to layout successor. If this is the case, then just
432       // remove it if we're allowed to make modifications.
433       if (AllowModify && !isPredicated(MBB.back()) &&
434           isUncondBranchOpcode(MBB.back().getOpcode()) &&
435           TBB && MBB.isLayoutSuccessor(TBB))
436         removeBranch(MBB);
437       return true;
438     }
439 
440     if (I == MBB.instr_begin())
441       return false;
442 
443     --I;
444   }
445 
446   // We made it past the terminators without bailing out - we must have
447   // analyzed this branch successfully.
448   return false;
449 }
450 
451 unsigned ARMBaseInstrInfo::removeBranch(MachineBasicBlock &MBB,
452                                         int *BytesRemoved) const {
453   assert(!BytesRemoved && "code size not handled");
454 
455   MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
456   if (I == MBB.end())
457     return 0;
458 
459   if (!isUncondBranchOpcode(I->getOpcode()) &&
460       !isCondBranchOpcode(I->getOpcode()))
461     return 0;
462 
463   // Remove the branch.
464   I->eraseFromParent();
465 
466   I = MBB.end();
467 
468   if (I == MBB.begin()) return 1;
469   --I;
470   if (!isCondBranchOpcode(I->getOpcode()))
471     return 1;
472 
473   // Remove the branch.
474   I->eraseFromParent();
475   return 2;
476 }
477 
478 unsigned ARMBaseInstrInfo::insertBranch(MachineBasicBlock &MBB,
479                                         MachineBasicBlock *TBB,
480                                         MachineBasicBlock *FBB,
481                                         ArrayRef<MachineOperand> Cond,
482                                         const DebugLoc &DL,
483                                         int *BytesAdded) const {
484   assert(!BytesAdded && "code size not handled");
485   ARMFunctionInfo *AFI = MBB.getParent()->getInfo<ARMFunctionInfo>();
486   int BOpc   = !AFI->isThumbFunction()
487     ? ARM::B : (AFI->isThumb2Function() ? ARM::t2B : ARM::tB);
488   int BccOpc = !AFI->isThumbFunction()
489     ? ARM::Bcc : (AFI->isThumb2Function() ? ARM::t2Bcc : ARM::tBcc);
490   bool isThumb = AFI->isThumbFunction() || AFI->isThumb2Function();
491 
492   // Shouldn't be a fall through.
493   assert(TBB && "insertBranch must not be told to insert a fallthrough");
494   assert((Cond.size() == 2 || Cond.size() == 0) &&
495          "ARM branch conditions have two components!");
496 
497   // For conditional branches, we use addOperand to preserve CPSR flags.
498 
499   if (!FBB) {
500     if (Cond.empty()) { // Unconditional branch?
501       if (isThumb)
502         BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB).add(predOps(ARMCC::AL));
503       else
504         BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB);
505     } else
506       BuildMI(&MBB, DL, get(BccOpc))
507           .addMBB(TBB)
508           .addImm(Cond[0].getImm())
509           .add(Cond[1]);
510     return 1;
511   }
512 
513   // Two-way conditional branch.
514   BuildMI(&MBB, DL, get(BccOpc))
515       .addMBB(TBB)
516       .addImm(Cond[0].getImm())
517       .add(Cond[1]);
518   if (isThumb)
519     BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB).add(predOps(ARMCC::AL));
520   else
521     BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB);
522   return 2;
523 }
524 
525 bool ARMBaseInstrInfo::
526 reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
527   ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm();
528   Cond[0].setImm(ARMCC::getOppositeCondition(CC));
529   return false;
530 }
531 
532 bool ARMBaseInstrInfo::isPredicated(const MachineInstr &MI) const {
533   if (MI.isBundle()) {
534     MachineBasicBlock::const_instr_iterator I = MI.getIterator();
535     MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end();
536     while (++I != E && I->isInsideBundle()) {
537       int PIdx = I->findFirstPredOperandIdx();
538       if (PIdx != -1 && I->getOperand(PIdx).getImm() != ARMCC::AL)
539         return true;
540     }
541     return false;
542   }
543 
544   int PIdx = MI.findFirstPredOperandIdx();
545   return PIdx != -1 && MI.getOperand(PIdx).getImm() != ARMCC::AL;
546 }
547 
548 std::string ARMBaseInstrInfo::createMIROperandComment(
549     const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
550     const TargetRegisterInfo *TRI) const {
551 
552   // First, let's see if there is a generic comment for this operand
553   std::string GenericComment =
554       TargetInstrInfo::createMIROperandComment(MI, Op, OpIdx, TRI);
555   if (!GenericComment.empty())
556     return GenericComment;
557 
558   // If not, check if we have an immediate operand.
559   if (Op.getType() != MachineOperand::MO_Immediate)
560     return std::string();
561 
562   // And print its corresponding condition code if the immediate is a
563   // predicate.
564   int FirstPredOp = MI.findFirstPredOperandIdx();
565   if (FirstPredOp != (int) OpIdx)
566     return std::string();
567 
568   std::string CC = "CC::";
569   CC += ARMCondCodeToString((ARMCC::CondCodes)Op.getImm());
570   return CC;
571 }
572 
573 bool ARMBaseInstrInfo::PredicateInstruction(
574     MachineInstr &MI, ArrayRef<MachineOperand> Pred) const {
575   unsigned Opc = MI.getOpcode();
576   if (isUncondBranchOpcode(Opc)) {
577     MI.setDesc(get(getMatchingCondBranchOpcode(Opc)));
578     MachineInstrBuilder(*MI.getParent()->getParent(), MI)
579       .addImm(Pred[0].getImm())
580       .addReg(Pred[1].getReg());
581     return true;
582   }
583 
584   int PIdx = MI.findFirstPredOperandIdx();
585   if (PIdx != -1) {
586     MachineOperand &PMO = MI.getOperand(PIdx);
587     PMO.setImm(Pred[0].getImm());
588     MI.getOperand(PIdx+1).setReg(Pred[1].getReg());
589 
590     // Thumb 1 arithmetic instructions do not set CPSR when executed inside an
591     // IT block. This affects how they are printed.
592     const MCInstrDesc &MCID = MI.getDesc();
593     if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
594       assert(MCID.OpInfo[1].isOptionalDef() && "CPSR def isn't expected operand");
595       assert((MI.getOperand(1).isDead() ||
596               MI.getOperand(1).getReg() != ARM::CPSR) &&
597              "if conversion tried to stop defining used CPSR");
598       MI.getOperand(1).setReg(ARM::NoRegister);
599     }
600 
601     return true;
602   }
603   return false;
604 }
605 
606 bool ARMBaseInstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
607                                          ArrayRef<MachineOperand> Pred2) const {
608   if (Pred1.size() > 2 || Pred2.size() > 2)
609     return false;
610 
611   ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm();
612   ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm();
613   if (CC1 == CC2)
614     return true;
615 
616   switch (CC1) {
617   default:
618     return false;
619   case ARMCC::AL:
620     return true;
621   case ARMCC::HS:
622     return CC2 == ARMCC::HI;
623   case ARMCC::LS:
624     return CC2 == ARMCC::LO || CC2 == ARMCC::EQ;
625   case ARMCC::GE:
626     return CC2 == ARMCC::GT;
627   case ARMCC::LE:
628     return CC2 == ARMCC::LT;
629   }
630 }
631 
632 bool ARMBaseInstrInfo::ClobbersPredicate(MachineInstr &MI,
633                                          std::vector<MachineOperand> &Pred,
634                                          bool SkipDead) const {
635   bool Found = false;
636   for (const MachineOperand &MO : MI.operands()) {
637     bool ClobbersCPSR = MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR);
638     bool IsCPSR = MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR;
639     if (ClobbersCPSR || IsCPSR) {
640 
641       // Filter out T1 instructions that have a dead CPSR,
642       // allowing IT blocks to be generated containing T1 instructions
643       const MCInstrDesc &MCID = MI.getDesc();
644       if (MCID.TSFlags & ARMII::ThumbArithFlagSetting && MO.isDead() &&
645           SkipDead)
646         continue;
647 
648       Pred.push_back(MO);
649       Found = true;
650     }
651   }
652 
653   return Found;
654 }
655 
656 bool ARMBaseInstrInfo::isCPSRDefined(const MachineInstr &MI) {
657   for (const auto &MO : MI.operands())
658     if (MO.isReg() && MO.getReg() == ARM::CPSR && MO.isDef() && !MO.isDead())
659       return true;
660   return false;
661 }
662 
663 static bool isEligibleForITBlock(const MachineInstr *MI) {
664   switch (MI->getOpcode()) {
665   default: return true;
666   case ARM::tADC:   // ADC (register) T1
667   case ARM::tADDi3: // ADD (immediate) T1
668   case ARM::tADDi8: // ADD (immediate) T2
669   case ARM::tADDrr: // ADD (register) T1
670   case ARM::tAND:   // AND (register) T1
671   case ARM::tASRri: // ASR (immediate) T1
672   case ARM::tASRrr: // ASR (register) T1
673   case ARM::tBIC:   // BIC (register) T1
674   case ARM::tEOR:   // EOR (register) T1
675   case ARM::tLSLri: // LSL (immediate) T1
676   case ARM::tLSLrr: // LSL (register) T1
677   case ARM::tLSRri: // LSR (immediate) T1
678   case ARM::tLSRrr: // LSR (register) T1
679   case ARM::tMUL:   // MUL T1
680   case ARM::tMVN:   // MVN (register) T1
681   case ARM::tORR:   // ORR (register) T1
682   case ARM::tROR:   // ROR (register) T1
683   case ARM::tRSB:   // RSB (immediate) T1
684   case ARM::tSBC:   // SBC (register) T1
685   case ARM::tSUBi3: // SUB (immediate) T1
686   case ARM::tSUBi8: // SUB (immediate) T2
687   case ARM::tSUBrr: // SUB (register) T1
688     return !ARMBaseInstrInfo::isCPSRDefined(*MI);
689   }
690 }
691 
692 /// isPredicable - Return true if the specified instruction can be predicated.
693 /// By default, this returns true for every instruction with a
694 /// PredicateOperand.
695 bool ARMBaseInstrInfo::isPredicable(const MachineInstr &MI) const {
696   if (!MI.isPredicable())
697     return false;
698 
699   if (MI.isBundle())
700     return false;
701 
702   if (!isEligibleForITBlock(&MI))
703     return false;
704 
705   const MachineFunction *MF = MI.getParent()->getParent();
706   const ARMFunctionInfo *AFI =
707       MF->getInfo<ARMFunctionInfo>();
708 
709   // Neon instructions in Thumb2 IT blocks are deprecated, see ARMARM.
710   // In their ARM encoding, they can't be encoded in a conditional form.
711   if ((MI.getDesc().TSFlags & ARMII::DomainMask) == ARMII::DomainNEON)
712     return false;
713 
714   // Make indirect control flow changes unpredicable when SLS mitigation is
715   // enabled.
716   const ARMSubtarget &ST = MF->getSubtarget<ARMSubtarget>();
717   if (ST.hardenSlsRetBr() && isIndirectControlFlowNotComingBack(MI))
718     return false;
719   if (ST.hardenSlsBlr() && isIndirectCall(MI))
720     return false;
721 
722   if (AFI->isThumb2Function()) {
723     if (getSubtarget().restrictIT())
724       return isV8EligibleForIT(&MI);
725   }
726 
727   return true;
728 }
729 
730 namespace llvm {
731 
732 template <> bool IsCPSRDead<MachineInstr>(const MachineInstr *MI) {
733   for (const MachineOperand &MO : MI->operands()) {
734     if (!MO.isReg() || MO.isUndef() || MO.isUse())
735       continue;
736     if (MO.getReg() != ARM::CPSR)
737       continue;
738     if (!MO.isDead())
739       return false;
740   }
741   // all definitions of CPSR are dead
742   return true;
743 }
744 
745 } // end namespace llvm
746 
747 /// GetInstSize - Return the size of the specified MachineInstr.
748 ///
749 unsigned ARMBaseInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
750   const MachineBasicBlock &MBB = *MI.getParent();
751   const MachineFunction *MF = MBB.getParent();
752   const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
753 
754   const MCInstrDesc &MCID = MI.getDesc();
755   if (MCID.getSize())
756     return MCID.getSize();
757 
758   switch (MI.getOpcode()) {
759   default:
760     // pseudo-instruction sizes are zero.
761     return 0;
762   case TargetOpcode::BUNDLE:
763     return getInstBundleLength(MI);
764   case ARM::MOVi16_ga_pcrel:
765   case ARM::MOVTi16_ga_pcrel:
766   case ARM::t2MOVi16_ga_pcrel:
767   case ARM::t2MOVTi16_ga_pcrel:
768     return 4;
769   case ARM::MOVi32imm:
770   case ARM::t2MOVi32imm:
771     return 8;
772   case ARM::CONSTPOOL_ENTRY:
773   case ARM::JUMPTABLE_INSTS:
774   case ARM::JUMPTABLE_ADDRS:
775   case ARM::JUMPTABLE_TBB:
776   case ARM::JUMPTABLE_TBH:
777     // If this machine instr is a constant pool entry, its size is recorded as
778     // operand #2.
779     return MI.getOperand(2).getImm();
780   case ARM::Int_eh_sjlj_longjmp:
781     return 16;
782   case ARM::tInt_eh_sjlj_longjmp:
783     return 10;
784   case ARM::tInt_WIN_eh_sjlj_longjmp:
785     return 12;
786   case ARM::Int_eh_sjlj_setjmp:
787   case ARM::Int_eh_sjlj_setjmp_nofp:
788     return 20;
789   case ARM::tInt_eh_sjlj_setjmp:
790   case ARM::t2Int_eh_sjlj_setjmp:
791   case ARM::t2Int_eh_sjlj_setjmp_nofp:
792     return 12;
793   case ARM::SPACE:
794     return MI.getOperand(1).getImm();
795   case ARM::INLINEASM:
796   case ARM::INLINEASM_BR: {
797     // If this machine instr is an inline asm, measure it.
798     unsigned Size = getInlineAsmLength(MI.getOperand(0).getSymbolName(), *MAI);
799     if (!MF->getInfo<ARMFunctionInfo>()->isThumbFunction())
800       Size = alignTo(Size, 4);
801     return Size;
802   }
803   case ARM::SpeculationBarrierISBDSBEndBB:
804   case ARM::t2SpeculationBarrierISBDSBEndBB:
805     // This gets lowered to 2 4-byte instructions.
806     return 8;
807   case ARM::SpeculationBarrierSBEndBB:
808   case ARM::t2SpeculationBarrierSBEndBB:
809     // This gets lowered to 1 4-byte instructions.
810     return 4;
811   }
812 }
813 
814 unsigned ARMBaseInstrInfo::getInstBundleLength(const MachineInstr &MI) const {
815   unsigned Size = 0;
816   MachineBasicBlock::const_instr_iterator I = MI.getIterator();
817   MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end();
818   while (++I != E && I->isInsideBundle()) {
819     assert(!I->isBundle() && "No nested bundle!");
820     Size += getInstSizeInBytes(*I);
821   }
822   return Size;
823 }
824 
825 void ARMBaseInstrInfo::copyFromCPSR(MachineBasicBlock &MBB,
826                                     MachineBasicBlock::iterator I,
827                                     unsigned DestReg, bool KillSrc,
828                                     const ARMSubtarget &Subtarget) const {
829   unsigned Opc = Subtarget.isThumb()
830                      ? (Subtarget.isMClass() ? ARM::t2MRS_M : ARM::t2MRS_AR)
831                      : ARM::MRS;
832 
833   MachineInstrBuilder MIB =
834       BuildMI(MBB, I, I->getDebugLoc(), get(Opc), DestReg);
835 
836   // There is only 1 A/R class MRS instruction, and it always refers to
837   // APSR. However, there are lots of other possibilities on M-class cores.
838   if (Subtarget.isMClass())
839     MIB.addImm(0x800);
840 
841   MIB.add(predOps(ARMCC::AL))
842      .addReg(ARM::CPSR, RegState::Implicit | getKillRegState(KillSrc));
843 }
844 
845 void ARMBaseInstrInfo::copyToCPSR(MachineBasicBlock &MBB,
846                                   MachineBasicBlock::iterator I,
847                                   unsigned SrcReg, bool KillSrc,
848                                   const ARMSubtarget &Subtarget) const {
849   unsigned Opc = Subtarget.isThumb()
850                      ? (Subtarget.isMClass() ? ARM::t2MSR_M : ARM::t2MSR_AR)
851                      : ARM::MSR;
852 
853   MachineInstrBuilder MIB = BuildMI(MBB, I, I->getDebugLoc(), get(Opc));
854 
855   if (Subtarget.isMClass())
856     MIB.addImm(0x800);
857   else
858     MIB.addImm(8);
859 
860   MIB.addReg(SrcReg, getKillRegState(KillSrc))
861      .add(predOps(ARMCC::AL))
862      .addReg(ARM::CPSR, RegState::Implicit | RegState::Define);
863 }
864 
865 void llvm::addUnpredicatedMveVpredNOp(MachineInstrBuilder &MIB) {
866   MIB.addImm(ARMVCC::None);
867   MIB.addReg(0);
868   MIB.addReg(0); // tp_reg
869 }
870 
871 void llvm::addUnpredicatedMveVpredROp(MachineInstrBuilder &MIB,
872                                       Register DestReg) {
873   addUnpredicatedMveVpredNOp(MIB);
874   MIB.addReg(DestReg, RegState::Undef);
875 }
876 
877 void llvm::addPredicatedMveVpredNOp(MachineInstrBuilder &MIB, unsigned Cond) {
878   MIB.addImm(Cond);
879   MIB.addReg(ARM::VPR, RegState::Implicit);
880   MIB.addReg(0); // tp_reg
881 }
882 
883 void llvm::addPredicatedMveVpredROp(MachineInstrBuilder &MIB,
884                                     unsigned Cond, unsigned Inactive) {
885   addPredicatedMveVpredNOp(MIB, Cond);
886   MIB.addReg(Inactive);
887 }
888 
889 void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
890                                    MachineBasicBlock::iterator I,
891                                    const DebugLoc &DL, MCRegister DestReg,
892                                    MCRegister SrcReg, bool KillSrc) const {
893   bool GPRDest = ARM::GPRRegClass.contains(DestReg);
894   bool GPRSrc = ARM::GPRRegClass.contains(SrcReg);
895 
896   if (GPRDest && GPRSrc) {
897     BuildMI(MBB, I, DL, get(ARM::MOVr), DestReg)
898         .addReg(SrcReg, getKillRegState(KillSrc))
899         .add(predOps(ARMCC::AL))
900         .add(condCodeOp());
901     return;
902   }
903 
904   bool SPRDest = ARM::SPRRegClass.contains(DestReg);
905   bool SPRSrc = ARM::SPRRegClass.contains(SrcReg);
906 
907   unsigned Opc = 0;
908   if (SPRDest && SPRSrc)
909     Opc = ARM::VMOVS;
910   else if (GPRDest && SPRSrc)
911     Opc = ARM::VMOVRS;
912   else if (SPRDest && GPRSrc)
913     Opc = ARM::VMOVSR;
914   else if (ARM::DPRRegClass.contains(DestReg, SrcReg) && Subtarget.hasFP64())
915     Opc = ARM::VMOVD;
916   else if (ARM::QPRRegClass.contains(DestReg, SrcReg))
917     Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MQPRCopy;
918 
919   if (Opc) {
920     MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opc), DestReg);
921     MIB.addReg(SrcReg, getKillRegState(KillSrc));
922     if (Opc == ARM::VORRq || Opc == ARM::MVE_VORR)
923       MIB.addReg(SrcReg, getKillRegState(KillSrc));
924     if (Opc == ARM::MVE_VORR)
925       addUnpredicatedMveVpredROp(MIB, DestReg);
926     else if (Opc != ARM::MQPRCopy)
927       MIB.add(predOps(ARMCC::AL));
928     return;
929   }
930 
931   // Handle register classes that require multiple instructions.
932   unsigned BeginIdx = 0;
933   unsigned SubRegs = 0;
934   int Spacing = 1;
935 
936   // Use VORRq when possible.
937   if (ARM::QQPRRegClass.contains(DestReg, SrcReg)) {
938     Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR;
939     BeginIdx = ARM::qsub_0;
940     SubRegs = 2;
941   } else if (ARM::QQQQPRRegClass.contains(DestReg, SrcReg)) {
942     Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR;
943     BeginIdx = ARM::qsub_0;
944     SubRegs = 4;
945   // Fall back to VMOVD.
946   } else if (ARM::DPairRegClass.contains(DestReg, SrcReg)) {
947     Opc = ARM::VMOVD;
948     BeginIdx = ARM::dsub_0;
949     SubRegs = 2;
950   } else if (ARM::DTripleRegClass.contains(DestReg, SrcReg)) {
951     Opc = ARM::VMOVD;
952     BeginIdx = ARM::dsub_0;
953     SubRegs = 3;
954   } else if (ARM::DQuadRegClass.contains(DestReg, SrcReg)) {
955     Opc = ARM::VMOVD;
956     BeginIdx = ARM::dsub_0;
957     SubRegs = 4;
958   } else if (ARM::GPRPairRegClass.contains(DestReg, SrcReg)) {
959     Opc = Subtarget.isThumb2() ? ARM::tMOVr : ARM::MOVr;
960     BeginIdx = ARM::gsub_0;
961     SubRegs = 2;
962   } else if (ARM::DPairSpcRegClass.contains(DestReg, SrcReg)) {
963     Opc = ARM::VMOVD;
964     BeginIdx = ARM::dsub_0;
965     SubRegs = 2;
966     Spacing = 2;
967   } else if (ARM::DTripleSpcRegClass.contains(DestReg, SrcReg)) {
968     Opc = ARM::VMOVD;
969     BeginIdx = ARM::dsub_0;
970     SubRegs = 3;
971     Spacing = 2;
972   } else if (ARM::DQuadSpcRegClass.contains(DestReg, SrcReg)) {
973     Opc = ARM::VMOVD;
974     BeginIdx = ARM::dsub_0;
975     SubRegs = 4;
976     Spacing = 2;
977   } else if (ARM::DPRRegClass.contains(DestReg, SrcReg) &&
978              !Subtarget.hasFP64()) {
979     Opc = ARM::VMOVS;
980     BeginIdx = ARM::ssub_0;
981     SubRegs = 2;
982   } else if (SrcReg == ARM::CPSR) {
983     copyFromCPSR(MBB, I, DestReg, KillSrc, Subtarget);
984     return;
985   } else if (DestReg == ARM::CPSR) {
986     copyToCPSR(MBB, I, SrcReg, KillSrc, Subtarget);
987     return;
988   } else if (DestReg == ARM::VPR) {
989     assert(ARM::GPRRegClass.contains(SrcReg));
990     BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VMSR_P0), DestReg)
991         .addReg(SrcReg, getKillRegState(KillSrc))
992         .add(predOps(ARMCC::AL));
993     return;
994   } else if (SrcReg == ARM::VPR) {
995     assert(ARM::GPRRegClass.contains(DestReg));
996     BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VMRS_P0), DestReg)
997         .addReg(SrcReg, getKillRegState(KillSrc))
998         .add(predOps(ARMCC::AL));
999     return;
1000   } else if (DestReg == ARM::FPSCR_NZCV) {
1001     assert(ARM::GPRRegClass.contains(SrcReg));
1002     BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VMSR_FPSCR_NZCVQC), DestReg)
1003         .addReg(SrcReg, getKillRegState(KillSrc))
1004         .add(predOps(ARMCC::AL));
1005     return;
1006   } else if (SrcReg == ARM::FPSCR_NZCV) {
1007     assert(ARM::GPRRegClass.contains(DestReg));
1008     BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VMRS_FPSCR_NZCVQC), DestReg)
1009         .addReg(SrcReg, getKillRegState(KillSrc))
1010         .add(predOps(ARMCC::AL));
1011     return;
1012   }
1013 
1014   assert(Opc && "Impossible reg-to-reg copy");
1015 
1016   const TargetRegisterInfo *TRI = &getRegisterInfo();
1017   MachineInstrBuilder Mov;
1018 
1019   // Copy register tuples backward when the first Dest reg overlaps with SrcReg.
1020   if (TRI->regsOverlap(SrcReg, TRI->getSubReg(DestReg, BeginIdx))) {
1021     BeginIdx = BeginIdx + ((SubRegs - 1) * Spacing);
1022     Spacing = -Spacing;
1023   }
1024 #ifndef NDEBUG
1025   SmallSet<unsigned, 4> DstRegs;
1026 #endif
1027   for (unsigned i = 0; i != SubRegs; ++i) {
1028     Register Dst = TRI->getSubReg(DestReg, BeginIdx + i * Spacing);
1029     Register Src = TRI->getSubReg(SrcReg, BeginIdx + i * Spacing);
1030     assert(Dst && Src && "Bad sub-register");
1031 #ifndef NDEBUG
1032     assert(!DstRegs.count(Src) && "destructive vector copy");
1033     DstRegs.insert(Dst);
1034 #endif
1035     Mov = BuildMI(MBB, I, I->getDebugLoc(), get(Opc), Dst).addReg(Src);
1036     // VORR (NEON or MVE) takes two source operands.
1037     if (Opc == ARM::VORRq || Opc == ARM::MVE_VORR) {
1038       Mov.addReg(Src);
1039     }
1040     // MVE VORR takes predicate operands in place of an ordinary condition.
1041     if (Opc == ARM::MVE_VORR)
1042       addUnpredicatedMveVpredROp(Mov, Dst);
1043     else
1044       Mov = Mov.add(predOps(ARMCC::AL));
1045     // MOVr can set CC.
1046     if (Opc == ARM::MOVr)
1047       Mov = Mov.add(condCodeOp());
1048   }
1049   // Add implicit super-register defs and kills to the last instruction.
1050   Mov->addRegisterDefined(DestReg, TRI);
1051   if (KillSrc)
1052     Mov->addRegisterKilled(SrcReg, TRI);
1053 }
1054 
1055 Optional<DestSourcePair>
1056 ARMBaseInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
1057   // VMOVRRD is also a copy instruction but it requires
1058   // special way of handling. It is more complex copy version
1059   // and since that we are not considering it. For recognition
1060   // of such instruction isExtractSubregLike MI interface fuction
1061   // could be used.
1062   // VORRq is considered as a move only if two inputs are
1063   // the same register.
1064   if (!MI.isMoveReg() ||
1065       (MI.getOpcode() == ARM::VORRq &&
1066        MI.getOperand(1).getReg() != MI.getOperand(2).getReg()))
1067     return None;
1068   return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1069 }
1070 
1071 Optional<ParamLoadedValue>
1072 ARMBaseInstrInfo::describeLoadedValue(const MachineInstr &MI,
1073                                       Register Reg) const {
1074   if (auto DstSrcPair = isCopyInstrImpl(MI)) {
1075     Register DstReg = DstSrcPair->Destination->getReg();
1076 
1077     // TODO: We don't handle cases where the forwarding reg is narrower/wider
1078     // than the copy registers. Consider for example:
1079     //
1080     //   s16 = VMOVS s0
1081     //   s17 = VMOVS s1
1082     //   call @callee(d0)
1083     //
1084     // We'd like to describe the call site value of d0 as d8, but this requires
1085     // gathering and merging the descriptions for the two VMOVS instructions.
1086     //
1087     // We also don't handle the reverse situation, where the forwarding reg is
1088     // narrower than the copy destination:
1089     //
1090     //   d8 = VMOVD d0
1091     //   call @callee(s1)
1092     //
1093     // We need to produce a fragment description (the call site value of s1 is
1094     // /not/ just d8).
1095     if (DstReg != Reg)
1096       return None;
1097   }
1098   return TargetInstrInfo::describeLoadedValue(MI, Reg);
1099 }
1100 
1101 const MachineInstrBuilder &
1102 ARMBaseInstrInfo::AddDReg(MachineInstrBuilder &MIB, unsigned Reg,
1103                           unsigned SubIdx, unsigned State,
1104                           const TargetRegisterInfo *TRI) const {
1105   if (!SubIdx)
1106     return MIB.addReg(Reg, State);
1107 
1108   if (Register::isPhysicalRegister(Reg))
1109     return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
1110   return MIB.addReg(Reg, State, SubIdx);
1111 }
1112 
1113 void ARMBaseInstrInfo::
1114 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
1115                     Register SrcReg, bool isKill, int FI,
1116                     const TargetRegisterClass *RC,
1117                     const TargetRegisterInfo *TRI) const {
1118   MachineFunction &MF = *MBB.getParent();
1119   MachineFrameInfo &MFI = MF.getFrameInfo();
1120   Align Alignment = MFI.getObjectAlign(FI);
1121 
1122   MachineMemOperand *MMO = MF.getMachineMemOperand(
1123       MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore,
1124       MFI.getObjectSize(FI), Alignment);
1125 
1126   switch (TRI->getSpillSize(*RC)) {
1127     case 2:
1128       if (ARM::HPRRegClass.hasSubClassEq(RC)) {
1129         BuildMI(MBB, I, DebugLoc(), get(ARM::VSTRH))
1130             .addReg(SrcReg, getKillRegState(isKill))
1131             .addFrameIndex(FI)
1132             .addImm(0)
1133             .addMemOperand(MMO)
1134             .add(predOps(ARMCC::AL));
1135       } else
1136         llvm_unreachable("Unknown reg class!");
1137       break;
1138     case 4:
1139       if (ARM::GPRRegClass.hasSubClassEq(RC)) {
1140         BuildMI(MBB, I, DebugLoc(), get(ARM::STRi12))
1141             .addReg(SrcReg, getKillRegState(isKill))
1142             .addFrameIndex(FI)
1143             .addImm(0)
1144             .addMemOperand(MMO)
1145             .add(predOps(ARMCC::AL));
1146       } else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
1147         BuildMI(MBB, I, DebugLoc(), get(ARM::VSTRS))
1148             .addReg(SrcReg, getKillRegState(isKill))
1149             .addFrameIndex(FI)
1150             .addImm(0)
1151             .addMemOperand(MMO)
1152             .add(predOps(ARMCC::AL));
1153       } else if (ARM::VCCRRegClass.hasSubClassEq(RC)) {
1154         BuildMI(MBB, I, DebugLoc(), get(ARM::VSTR_P0_off))
1155             .addReg(SrcReg, getKillRegState(isKill))
1156             .addFrameIndex(FI)
1157             .addImm(0)
1158             .addMemOperand(MMO)
1159             .add(predOps(ARMCC::AL));
1160       } else
1161         llvm_unreachable("Unknown reg class!");
1162       break;
1163     case 8:
1164       if (ARM::DPRRegClass.hasSubClassEq(RC)) {
1165         BuildMI(MBB, I, DebugLoc(), get(ARM::VSTRD))
1166             .addReg(SrcReg, getKillRegState(isKill))
1167             .addFrameIndex(FI)
1168             .addImm(0)
1169             .addMemOperand(MMO)
1170             .add(predOps(ARMCC::AL));
1171       } else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
1172         if (Subtarget.hasV5TEOps()) {
1173           MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(), get(ARM::STRD));
1174           AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI);
1175           AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI);
1176           MIB.addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO)
1177              .add(predOps(ARMCC::AL));
1178         } else {
1179           // Fallback to STM instruction, which has existed since the dawn of
1180           // time.
1181           MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(), get(ARM::STMIA))
1182                                         .addFrameIndex(FI)
1183                                         .addMemOperand(MMO)
1184                                         .add(predOps(ARMCC::AL));
1185           AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI);
1186           AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI);
1187         }
1188       } else
1189         llvm_unreachable("Unknown reg class!");
1190       break;
1191     case 16:
1192       if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) {
1193         // Use aligned spills if the stack can be realigned.
1194         if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF)) {
1195           BuildMI(MBB, I, DebugLoc(), get(ARM::VST1q64))
1196               .addFrameIndex(FI)
1197               .addImm(16)
1198               .addReg(SrcReg, getKillRegState(isKill))
1199               .addMemOperand(MMO)
1200               .add(predOps(ARMCC::AL));
1201         } else {
1202           BuildMI(MBB, I, DebugLoc(), get(ARM::VSTMQIA))
1203               .addReg(SrcReg, getKillRegState(isKill))
1204               .addFrameIndex(FI)
1205               .addMemOperand(MMO)
1206               .add(predOps(ARMCC::AL));
1207         }
1208       } else if (ARM::QPRRegClass.hasSubClassEq(RC) &&
1209                  Subtarget.hasMVEIntegerOps()) {
1210         auto MIB = BuildMI(MBB, I, DebugLoc(), get(ARM::MVE_VSTRWU32));
1211         MIB.addReg(SrcReg, getKillRegState(isKill))
1212           .addFrameIndex(FI)
1213           .addImm(0)
1214           .addMemOperand(MMO);
1215         addUnpredicatedMveVpredNOp(MIB);
1216       } else
1217         llvm_unreachable("Unknown reg class!");
1218       break;
1219     case 24:
1220       if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
1221         // Use aligned spills if the stack can be realigned.
1222         if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF) &&
1223             Subtarget.hasNEON()) {
1224           BuildMI(MBB, I, DebugLoc(), get(ARM::VST1d64TPseudo))
1225               .addFrameIndex(FI)
1226               .addImm(16)
1227               .addReg(SrcReg, getKillRegState(isKill))
1228               .addMemOperand(MMO)
1229               .add(predOps(ARMCC::AL));
1230         } else {
1231           MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(),
1232                                             get(ARM::VSTMDIA))
1233                                         .addFrameIndex(FI)
1234                                         .add(predOps(ARMCC::AL))
1235                                         .addMemOperand(MMO);
1236           MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
1237           MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
1238           AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
1239         }
1240       } else
1241         llvm_unreachable("Unknown reg class!");
1242       break;
1243     case 32:
1244       if (ARM::QQPRRegClass.hasSubClassEq(RC) ||
1245           ARM::MQQPRRegClass.hasSubClassEq(RC) ||
1246           ARM::DQuadRegClass.hasSubClassEq(RC)) {
1247         if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF) &&
1248             Subtarget.hasNEON()) {
1249           // FIXME: It's possible to only store part of the QQ register if the
1250           // spilled def has a sub-register index.
1251           BuildMI(MBB, I, DebugLoc(), get(ARM::VST1d64QPseudo))
1252               .addFrameIndex(FI)
1253               .addImm(16)
1254               .addReg(SrcReg, getKillRegState(isKill))
1255               .addMemOperand(MMO)
1256               .add(predOps(ARMCC::AL));
1257         } else if (Subtarget.hasMVEIntegerOps()) {
1258           BuildMI(MBB, I, DebugLoc(), get(ARM::MQQPRStore))
1259               .addReg(SrcReg, getKillRegState(isKill))
1260               .addFrameIndex(FI)
1261               .addMemOperand(MMO);
1262         } else {
1263           MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(),
1264                                             get(ARM::VSTMDIA))
1265                                         .addFrameIndex(FI)
1266                                         .add(predOps(ARMCC::AL))
1267                                         .addMemOperand(MMO);
1268           MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
1269           MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
1270           MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
1271                 AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI);
1272         }
1273       } else
1274         llvm_unreachable("Unknown reg class!");
1275       break;
1276     case 64:
1277       if (ARM::MQQQQPRRegClass.hasSubClassEq(RC) &&
1278           Subtarget.hasMVEIntegerOps()) {
1279         BuildMI(MBB, I, DebugLoc(), get(ARM::MQQQQPRStore))
1280             .addReg(SrcReg, getKillRegState(isKill))
1281             .addFrameIndex(FI)
1282             .addMemOperand(MMO);
1283       } else if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
1284         MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(), get(ARM::VSTMDIA))
1285                                       .addFrameIndex(FI)
1286                                       .add(predOps(ARMCC::AL))
1287                                       .addMemOperand(MMO);
1288         MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
1289         MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
1290         MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
1291         MIB = AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI);
1292         MIB = AddDReg(MIB, SrcReg, ARM::dsub_4, 0, TRI);
1293         MIB = AddDReg(MIB, SrcReg, ARM::dsub_5, 0, TRI);
1294         MIB = AddDReg(MIB, SrcReg, ARM::dsub_6, 0, TRI);
1295               AddDReg(MIB, SrcReg, ARM::dsub_7, 0, TRI);
1296       } else
1297         llvm_unreachable("Unknown reg class!");
1298       break;
1299     default:
1300       llvm_unreachable("Unknown reg class!");
1301   }
1302 }
1303 
1304 unsigned ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
1305                                               int &FrameIndex) const {
1306   switch (MI.getOpcode()) {
1307   default: break;
1308   case ARM::STRrs:
1309   case ARM::t2STRs: // FIXME: don't use t2STRs to access frame.
1310     if (MI.getOperand(1).isFI() && MI.getOperand(2).isReg() &&
1311         MI.getOperand(3).isImm() && MI.getOperand(2).getReg() == 0 &&
1312         MI.getOperand(3).getImm() == 0) {
1313       FrameIndex = MI.getOperand(1).getIndex();
1314       return MI.getOperand(0).getReg();
1315     }
1316     break;
1317   case ARM::STRi12:
1318   case ARM::t2STRi12:
1319   case ARM::tSTRspi:
1320   case ARM::VSTRD:
1321   case ARM::VSTRS:
1322   case ARM::VSTR_P0_off:
1323   case ARM::MVE_VSTRWU32:
1324     if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
1325         MI.getOperand(2).getImm() == 0) {
1326       FrameIndex = MI.getOperand(1).getIndex();
1327       return MI.getOperand(0).getReg();
1328     }
1329     break;
1330   case ARM::VST1q64:
1331   case ARM::VST1d64TPseudo:
1332   case ARM::VST1d64QPseudo:
1333     if (MI.getOperand(0).isFI() && MI.getOperand(2).getSubReg() == 0) {
1334       FrameIndex = MI.getOperand(0).getIndex();
1335       return MI.getOperand(2).getReg();
1336     }
1337     break;
1338   case ARM::VSTMQIA:
1339     if (MI.getOperand(1).isFI() && MI.getOperand(0).getSubReg() == 0) {
1340       FrameIndex = MI.getOperand(1).getIndex();
1341       return MI.getOperand(0).getReg();
1342     }
1343     break;
1344   case ARM::MQQPRStore:
1345   case ARM::MQQQQPRStore:
1346     if (MI.getOperand(1).isFI()) {
1347       FrameIndex = MI.getOperand(1).getIndex();
1348       return MI.getOperand(0).getReg();
1349     }
1350     break;
1351   }
1352 
1353   return 0;
1354 }
1355 
1356 unsigned ARMBaseInstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI,
1357                                                     int &FrameIndex) const {
1358   SmallVector<const MachineMemOperand *, 1> Accesses;
1359   if (MI.mayStore() && hasStoreToStackSlot(MI, Accesses) &&
1360       Accesses.size() == 1) {
1361     FrameIndex =
1362         cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue())
1363             ->getFrameIndex();
1364     return true;
1365   }
1366   return false;
1367 }
1368 
1369 void ARMBaseInstrInfo::
1370 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
1371                      Register DestReg, int FI,
1372                      const TargetRegisterClass *RC,
1373                      const TargetRegisterInfo *TRI) const {
1374   DebugLoc DL;
1375   if (I != MBB.end()) DL = I->getDebugLoc();
1376   MachineFunction &MF = *MBB.getParent();
1377   MachineFrameInfo &MFI = MF.getFrameInfo();
1378   const Align Alignment = MFI.getObjectAlign(FI);
1379   MachineMemOperand *MMO = MF.getMachineMemOperand(
1380       MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad,
1381       MFI.getObjectSize(FI), Alignment);
1382 
1383   switch (TRI->getSpillSize(*RC)) {
1384   case 2:
1385     if (ARM::HPRRegClass.hasSubClassEq(RC)) {
1386       BuildMI(MBB, I, DL, get(ARM::VLDRH), DestReg)
1387           .addFrameIndex(FI)
1388           .addImm(0)
1389           .addMemOperand(MMO)
1390           .add(predOps(ARMCC::AL));
1391     } else
1392       llvm_unreachable("Unknown reg class!");
1393     break;
1394   case 4:
1395     if (ARM::GPRRegClass.hasSubClassEq(RC)) {
1396       BuildMI(MBB, I, DL, get(ARM::LDRi12), DestReg)
1397           .addFrameIndex(FI)
1398           .addImm(0)
1399           .addMemOperand(MMO)
1400           .add(predOps(ARMCC::AL));
1401     } else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
1402       BuildMI(MBB, I, DL, get(ARM::VLDRS), DestReg)
1403           .addFrameIndex(FI)
1404           .addImm(0)
1405           .addMemOperand(MMO)
1406           .add(predOps(ARMCC::AL));
1407     } else if (ARM::VCCRRegClass.hasSubClassEq(RC)) {
1408       BuildMI(MBB, I, DL, get(ARM::VLDR_P0_off), DestReg)
1409           .addFrameIndex(FI)
1410           .addImm(0)
1411           .addMemOperand(MMO)
1412           .add(predOps(ARMCC::AL));
1413     } else
1414       llvm_unreachable("Unknown reg class!");
1415     break;
1416   case 8:
1417     if (ARM::DPRRegClass.hasSubClassEq(RC)) {
1418       BuildMI(MBB, I, DL, get(ARM::VLDRD), DestReg)
1419           .addFrameIndex(FI)
1420           .addImm(0)
1421           .addMemOperand(MMO)
1422           .add(predOps(ARMCC::AL));
1423     } else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
1424       MachineInstrBuilder MIB;
1425 
1426       if (Subtarget.hasV5TEOps()) {
1427         MIB = BuildMI(MBB, I, DL, get(ARM::LDRD));
1428         AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI);
1429         AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI);
1430         MIB.addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO)
1431            .add(predOps(ARMCC::AL));
1432       } else {
1433         // Fallback to LDM instruction, which has existed since the dawn of
1434         // time.
1435         MIB = BuildMI(MBB, I, DL, get(ARM::LDMIA))
1436                   .addFrameIndex(FI)
1437                   .addMemOperand(MMO)
1438                   .add(predOps(ARMCC::AL));
1439         MIB = AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI);
1440         MIB = AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI);
1441       }
1442 
1443       if (Register::isPhysicalRegister(DestReg))
1444         MIB.addReg(DestReg, RegState::ImplicitDefine);
1445     } else
1446       llvm_unreachable("Unknown reg class!");
1447     break;
1448   case 16:
1449     if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) {
1450       if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF)) {
1451         BuildMI(MBB, I, DL, get(ARM::VLD1q64), DestReg)
1452             .addFrameIndex(FI)
1453             .addImm(16)
1454             .addMemOperand(MMO)
1455             .add(predOps(ARMCC::AL));
1456       } else {
1457         BuildMI(MBB, I, DL, get(ARM::VLDMQIA), DestReg)
1458             .addFrameIndex(FI)
1459             .addMemOperand(MMO)
1460             .add(predOps(ARMCC::AL));
1461       }
1462     } else if (ARM::QPRRegClass.hasSubClassEq(RC) &&
1463                Subtarget.hasMVEIntegerOps()) {
1464       auto MIB = BuildMI(MBB, I, DL, get(ARM::MVE_VLDRWU32), DestReg);
1465       MIB.addFrameIndex(FI)
1466         .addImm(0)
1467         .addMemOperand(MMO);
1468       addUnpredicatedMveVpredNOp(MIB);
1469     } else
1470       llvm_unreachable("Unknown reg class!");
1471     break;
1472   case 24:
1473     if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
1474       if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF) &&
1475           Subtarget.hasNEON()) {
1476         BuildMI(MBB, I, DL, get(ARM::VLD1d64TPseudo), DestReg)
1477             .addFrameIndex(FI)
1478             .addImm(16)
1479             .addMemOperand(MMO)
1480             .add(predOps(ARMCC::AL));
1481       } else {
1482         MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLDMDIA))
1483                                       .addFrameIndex(FI)
1484                                       .addMemOperand(MMO)
1485                                       .add(predOps(ARMCC::AL));
1486         MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI);
1487         MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI);
1488         MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI);
1489         if (Register::isPhysicalRegister(DestReg))
1490           MIB.addReg(DestReg, RegState::ImplicitDefine);
1491       }
1492     } else
1493       llvm_unreachable("Unknown reg class!");
1494     break;
1495    case 32:
1496      if (ARM::QQPRRegClass.hasSubClassEq(RC) ||
1497          ARM::MQQPRRegClass.hasSubClassEq(RC) ||
1498          ARM::DQuadRegClass.hasSubClassEq(RC)) {
1499        if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF) &&
1500            Subtarget.hasNEON()) {
1501          BuildMI(MBB, I, DL, get(ARM::VLD1d64QPseudo), DestReg)
1502              .addFrameIndex(FI)
1503              .addImm(16)
1504              .addMemOperand(MMO)
1505              .add(predOps(ARMCC::AL));
1506        } else if (Subtarget.hasMVEIntegerOps()) {
1507          BuildMI(MBB, I, DL, get(ARM::MQQPRLoad), DestReg)
1508              .addFrameIndex(FI)
1509              .addMemOperand(MMO);
1510        } else {
1511          MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLDMDIA))
1512                                        .addFrameIndex(FI)
1513                                        .add(predOps(ARMCC::AL))
1514                                        .addMemOperand(MMO);
1515          MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI);
1516          MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI);
1517          MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI);
1518          MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI);
1519          if (Register::isPhysicalRegister(DestReg))
1520            MIB.addReg(DestReg, RegState::ImplicitDefine);
1521        }
1522      } else
1523        llvm_unreachable("Unknown reg class!");
1524      break;
1525   case 64:
1526     if (ARM::MQQQQPRRegClass.hasSubClassEq(RC) &&
1527         Subtarget.hasMVEIntegerOps()) {
1528       BuildMI(MBB, I, DL, get(ARM::MQQQQPRLoad), DestReg)
1529           .addFrameIndex(FI)
1530           .addMemOperand(MMO);
1531     } else if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
1532       MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLDMDIA))
1533                                     .addFrameIndex(FI)
1534                                     .add(predOps(ARMCC::AL))
1535                                     .addMemOperand(MMO);
1536       MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI);
1537       MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI);
1538       MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI);
1539       MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI);
1540       MIB = AddDReg(MIB, DestReg, ARM::dsub_4, RegState::DefineNoRead, TRI);
1541       MIB = AddDReg(MIB, DestReg, ARM::dsub_5, RegState::DefineNoRead, TRI);
1542       MIB = AddDReg(MIB, DestReg, ARM::dsub_6, RegState::DefineNoRead, TRI);
1543       MIB = AddDReg(MIB, DestReg, ARM::dsub_7, RegState::DefineNoRead, TRI);
1544       if (Register::isPhysicalRegister(DestReg))
1545         MIB.addReg(DestReg, RegState::ImplicitDefine);
1546     } else
1547       llvm_unreachable("Unknown reg class!");
1548     break;
1549   default:
1550     llvm_unreachable("Unknown regclass!");
1551   }
1552 }
1553 
1554 unsigned ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
1555                                                int &FrameIndex) const {
1556   switch (MI.getOpcode()) {
1557   default: break;
1558   case ARM::LDRrs:
1559   case ARM::t2LDRs:  // FIXME: don't use t2LDRs to access frame.
1560     if (MI.getOperand(1).isFI() && MI.getOperand(2).isReg() &&
1561         MI.getOperand(3).isImm() && MI.getOperand(2).getReg() == 0 &&
1562         MI.getOperand(3).getImm() == 0) {
1563       FrameIndex = MI.getOperand(1).getIndex();
1564       return MI.getOperand(0).getReg();
1565     }
1566     break;
1567   case ARM::LDRi12:
1568   case ARM::t2LDRi12:
1569   case ARM::tLDRspi:
1570   case ARM::VLDRD:
1571   case ARM::VLDRS:
1572   case ARM::VLDR_P0_off:
1573   case ARM::MVE_VLDRWU32:
1574     if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
1575         MI.getOperand(2).getImm() == 0) {
1576       FrameIndex = MI.getOperand(1).getIndex();
1577       return MI.getOperand(0).getReg();
1578     }
1579     break;
1580   case ARM::VLD1q64:
1581   case ARM::VLD1d8TPseudo:
1582   case ARM::VLD1d16TPseudo:
1583   case ARM::VLD1d32TPseudo:
1584   case ARM::VLD1d64TPseudo:
1585   case ARM::VLD1d8QPseudo:
1586   case ARM::VLD1d16QPseudo:
1587   case ARM::VLD1d32QPseudo:
1588   case ARM::VLD1d64QPseudo:
1589     if (MI.getOperand(1).isFI() && MI.getOperand(0).getSubReg() == 0) {
1590       FrameIndex = MI.getOperand(1).getIndex();
1591       return MI.getOperand(0).getReg();
1592     }
1593     break;
1594   case ARM::VLDMQIA:
1595     if (MI.getOperand(1).isFI() && MI.getOperand(0).getSubReg() == 0) {
1596       FrameIndex = MI.getOperand(1).getIndex();
1597       return MI.getOperand(0).getReg();
1598     }
1599     break;
1600   case ARM::MQQPRLoad:
1601   case ARM::MQQQQPRLoad:
1602     if (MI.getOperand(1).isFI()) {
1603       FrameIndex = MI.getOperand(1).getIndex();
1604       return MI.getOperand(0).getReg();
1605     }
1606     break;
1607   }
1608 
1609   return 0;
1610 }
1611 
1612 unsigned ARMBaseInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI,
1613                                                      int &FrameIndex) const {
1614   SmallVector<const MachineMemOperand *, 1> Accesses;
1615   if (MI.mayLoad() && hasLoadFromStackSlot(MI, Accesses) &&
1616       Accesses.size() == 1) {
1617     FrameIndex =
1618         cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue())
1619             ->getFrameIndex();
1620     return true;
1621   }
1622   return false;
1623 }
1624 
1625 /// Expands MEMCPY to either LDMIA/STMIA or LDMIA_UPD/STMID_UPD
1626 /// depending on whether the result is used.
1627 void ARMBaseInstrInfo::expandMEMCPY(MachineBasicBlock::iterator MI) const {
1628   bool isThumb1 = Subtarget.isThumb1Only();
1629   bool isThumb2 = Subtarget.isThumb2();
1630   const ARMBaseInstrInfo *TII = Subtarget.getInstrInfo();
1631 
1632   DebugLoc dl = MI->getDebugLoc();
1633   MachineBasicBlock *BB = MI->getParent();
1634 
1635   MachineInstrBuilder LDM, STM;
1636   if (isThumb1 || !MI->getOperand(1).isDead()) {
1637     MachineOperand LDWb(MI->getOperand(1));
1638     LDM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2LDMIA_UPD
1639                                                  : isThumb1 ? ARM::tLDMIA_UPD
1640                                                             : ARM::LDMIA_UPD))
1641               .add(LDWb);
1642   } else {
1643     LDM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2LDMIA : ARM::LDMIA));
1644   }
1645 
1646   if (isThumb1 || !MI->getOperand(0).isDead()) {
1647     MachineOperand STWb(MI->getOperand(0));
1648     STM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2STMIA_UPD
1649                                                  : isThumb1 ? ARM::tSTMIA_UPD
1650                                                             : ARM::STMIA_UPD))
1651               .add(STWb);
1652   } else {
1653     STM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2STMIA : ARM::STMIA));
1654   }
1655 
1656   MachineOperand LDBase(MI->getOperand(3));
1657   LDM.add(LDBase).add(predOps(ARMCC::AL));
1658 
1659   MachineOperand STBase(MI->getOperand(2));
1660   STM.add(STBase).add(predOps(ARMCC::AL));
1661 
1662   // Sort the scratch registers into ascending order.
1663   const TargetRegisterInfo &TRI = getRegisterInfo();
1664   SmallVector<unsigned, 6> ScratchRegs;
1665   for(unsigned I = 5; I < MI->getNumOperands(); ++I)
1666     ScratchRegs.push_back(MI->getOperand(I).getReg());
1667   llvm::sort(ScratchRegs,
1668              [&TRI](const unsigned &Reg1, const unsigned &Reg2) -> bool {
1669                return TRI.getEncodingValue(Reg1) <
1670                       TRI.getEncodingValue(Reg2);
1671              });
1672 
1673   for (const auto &Reg : ScratchRegs) {
1674     LDM.addReg(Reg, RegState::Define);
1675     STM.addReg(Reg, RegState::Kill);
1676   }
1677 
1678   BB->erase(MI);
1679 }
1680 
1681 bool ARMBaseInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
1682   if (MI.getOpcode() == TargetOpcode::LOAD_STACK_GUARD) {
1683     expandLoadStackGuard(MI);
1684     MI.getParent()->erase(MI);
1685     return true;
1686   }
1687 
1688   if (MI.getOpcode() == ARM::MEMCPY) {
1689     expandMEMCPY(MI);
1690     return true;
1691   }
1692 
1693   // This hook gets to expand COPY instructions before they become
1694   // copyPhysReg() calls.  Look for VMOVS instructions that can legally be
1695   // widened to VMOVD.  We prefer the VMOVD when possible because it may be
1696   // changed into a VORR that can go down the NEON pipeline.
1697   if (!MI.isCopy() || Subtarget.dontWidenVMOVS() || !Subtarget.hasFP64())
1698     return false;
1699 
1700   // Look for a copy between even S-registers.  That is where we keep floats
1701   // when using NEON v2f32 instructions for f32 arithmetic.
1702   Register DstRegS = MI.getOperand(0).getReg();
1703   Register SrcRegS = MI.getOperand(1).getReg();
1704   if (!ARM::SPRRegClass.contains(DstRegS, SrcRegS))
1705     return false;
1706 
1707   const TargetRegisterInfo *TRI = &getRegisterInfo();
1708   unsigned DstRegD = TRI->getMatchingSuperReg(DstRegS, ARM::ssub_0,
1709                                               &ARM::DPRRegClass);
1710   unsigned SrcRegD = TRI->getMatchingSuperReg(SrcRegS, ARM::ssub_0,
1711                                               &ARM::DPRRegClass);
1712   if (!DstRegD || !SrcRegD)
1713     return false;
1714 
1715   // We want to widen this into a DstRegD = VMOVD SrcRegD copy.  This is only
1716   // legal if the COPY already defines the full DstRegD, and it isn't a
1717   // sub-register insertion.
1718   if (!MI.definesRegister(DstRegD, TRI) || MI.readsRegister(DstRegD, TRI))
1719     return false;
1720 
1721   // A dead copy shouldn't show up here, but reject it just in case.
1722   if (MI.getOperand(0).isDead())
1723     return false;
1724 
1725   // All clear, widen the COPY.
1726   LLVM_DEBUG(dbgs() << "widening:    " << MI);
1727   MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
1728 
1729   // Get rid of the old implicit-def of DstRegD.  Leave it if it defines a Q-reg
1730   // or some other super-register.
1731   int ImpDefIdx = MI.findRegisterDefOperandIdx(DstRegD);
1732   if (ImpDefIdx != -1)
1733     MI.RemoveOperand(ImpDefIdx);
1734 
1735   // Change the opcode and operands.
1736   MI.setDesc(get(ARM::VMOVD));
1737   MI.getOperand(0).setReg(DstRegD);
1738   MI.getOperand(1).setReg(SrcRegD);
1739   MIB.add(predOps(ARMCC::AL));
1740 
1741   // We are now reading SrcRegD instead of SrcRegS.  This may upset the
1742   // register scavenger and machine verifier, so we need to indicate that we
1743   // are reading an undefined value from SrcRegD, but a proper value from
1744   // SrcRegS.
1745   MI.getOperand(1).setIsUndef();
1746   MIB.addReg(SrcRegS, RegState::Implicit);
1747 
1748   // SrcRegD may actually contain an unrelated value in the ssub_1
1749   // sub-register.  Don't kill it.  Only kill the ssub_0 sub-register.
1750   if (MI.getOperand(1).isKill()) {
1751     MI.getOperand(1).setIsKill(false);
1752     MI.addRegisterKilled(SrcRegS, TRI, true);
1753   }
1754 
1755   LLVM_DEBUG(dbgs() << "replaced by: " << MI);
1756   return true;
1757 }
1758 
1759 /// Create a copy of a const pool value. Update CPI to the new index and return
1760 /// the label UID.
1761 static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) {
1762   MachineConstantPool *MCP = MF.getConstantPool();
1763   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1764 
1765   const MachineConstantPoolEntry &MCPE = MCP->getConstants()[CPI];
1766   assert(MCPE.isMachineConstantPoolEntry() &&
1767          "Expecting a machine constantpool entry!");
1768   ARMConstantPoolValue *ACPV =
1769     static_cast<ARMConstantPoolValue*>(MCPE.Val.MachineCPVal);
1770 
1771   unsigned PCLabelId = AFI->createPICLabelUId();
1772   ARMConstantPoolValue *NewCPV = nullptr;
1773 
1774   // FIXME: The below assumes PIC relocation model and that the function
1775   // is Thumb mode (t1 or t2). PCAdjustment would be 8 for ARM mode PIC, and
1776   // zero for non-PIC in ARM or Thumb. The callers are all of thumb LDR
1777   // instructions, so that's probably OK, but is PIC always correct when
1778   // we get here?
1779   if (ACPV->isGlobalValue())
1780     NewCPV = ARMConstantPoolConstant::Create(
1781         cast<ARMConstantPoolConstant>(ACPV)->getGV(), PCLabelId, ARMCP::CPValue,
1782         4, ACPV->getModifier(), ACPV->mustAddCurrentAddress());
1783   else if (ACPV->isExtSymbol())
1784     NewCPV = ARMConstantPoolSymbol::
1785       Create(MF.getFunction().getContext(),
1786              cast<ARMConstantPoolSymbol>(ACPV)->getSymbol(), PCLabelId, 4);
1787   else if (ACPV->isBlockAddress())
1788     NewCPV = ARMConstantPoolConstant::
1789       Create(cast<ARMConstantPoolConstant>(ACPV)->getBlockAddress(), PCLabelId,
1790              ARMCP::CPBlockAddress, 4);
1791   else if (ACPV->isLSDA())
1792     NewCPV = ARMConstantPoolConstant::Create(&MF.getFunction(), PCLabelId,
1793                                              ARMCP::CPLSDA, 4);
1794   else if (ACPV->isMachineBasicBlock())
1795     NewCPV = ARMConstantPoolMBB::
1796       Create(MF.getFunction().getContext(),
1797              cast<ARMConstantPoolMBB>(ACPV)->getMBB(), PCLabelId, 4);
1798   else
1799     llvm_unreachable("Unexpected ARM constantpool value type!!");
1800   CPI = MCP->getConstantPoolIndex(NewCPV, MCPE.getAlign());
1801   return PCLabelId;
1802 }
1803 
1804 void ARMBaseInstrInfo::reMaterialize(MachineBasicBlock &MBB,
1805                                      MachineBasicBlock::iterator I,
1806                                      Register DestReg, unsigned SubIdx,
1807                                      const MachineInstr &Orig,
1808                                      const TargetRegisterInfo &TRI) const {
1809   unsigned Opcode = Orig.getOpcode();
1810   switch (Opcode) {
1811   default: {
1812     MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
1813     MI->substituteRegister(Orig.getOperand(0).getReg(), DestReg, SubIdx, TRI);
1814     MBB.insert(I, MI);
1815     break;
1816   }
1817   case ARM::tLDRpci_pic:
1818   case ARM::t2LDRpci_pic: {
1819     MachineFunction &MF = *MBB.getParent();
1820     unsigned CPI = Orig.getOperand(1).getIndex();
1821     unsigned PCLabelId = duplicateCPV(MF, CPI);
1822     BuildMI(MBB, I, Orig.getDebugLoc(), get(Opcode), DestReg)
1823         .addConstantPoolIndex(CPI)
1824         .addImm(PCLabelId)
1825         .cloneMemRefs(Orig);
1826     break;
1827   }
1828   }
1829 }
1830 
1831 MachineInstr &
1832 ARMBaseInstrInfo::duplicate(MachineBasicBlock &MBB,
1833     MachineBasicBlock::iterator InsertBefore,
1834     const MachineInstr &Orig) const {
1835   MachineInstr &Cloned = TargetInstrInfo::duplicate(MBB, InsertBefore, Orig);
1836   MachineBasicBlock::instr_iterator I = Cloned.getIterator();
1837   for (;;) {
1838     switch (I->getOpcode()) {
1839     case ARM::tLDRpci_pic:
1840     case ARM::t2LDRpci_pic: {
1841       MachineFunction &MF = *MBB.getParent();
1842       unsigned CPI = I->getOperand(1).getIndex();
1843       unsigned PCLabelId = duplicateCPV(MF, CPI);
1844       I->getOperand(1).setIndex(CPI);
1845       I->getOperand(2).setImm(PCLabelId);
1846       break;
1847     }
1848     }
1849     if (!I->isBundledWithSucc())
1850       break;
1851     ++I;
1852   }
1853   return Cloned;
1854 }
1855 
1856 bool ARMBaseInstrInfo::produceSameValue(const MachineInstr &MI0,
1857                                         const MachineInstr &MI1,
1858                                         const MachineRegisterInfo *MRI) const {
1859   unsigned Opcode = MI0.getOpcode();
1860   if (Opcode == ARM::t2LDRpci ||
1861       Opcode == ARM::t2LDRpci_pic ||
1862       Opcode == ARM::tLDRpci ||
1863       Opcode == ARM::tLDRpci_pic ||
1864       Opcode == ARM::LDRLIT_ga_pcrel ||
1865       Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
1866       Opcode == ARM::tLDRLIT_ga_pcrel ||
1867       Opcode == ARM::MOV_ga_pcrel ||
1868       Opcode == ARM::MOV_ga_pcrel_ldr ||
1869       Opcode == ARM::t2MOV_ga_pcrel) {
1870     if (MI1.getOpcode() != Opcode)
1871       return false;
1872     if (MI0.getNumOperands() != MI1.getNumOperands())
1873       return false;
1874 
1875     const MachineOperand &MO0 = MI0.getOperand(1);
1876     const MachineOperand &MO1 = MI1.getOperand(1);
1877     if (MO0.getOffset() != MO1.getOffset())
1878       return false;
1879 
1880     if (Opcode == ARM::LDRLIT_ga_pcrel ||
1881         Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
1882         Opcode == ARM::tLDRLIT_ga_pcrel ||
1883         Opcode == ARM::MOV_ga_pcrel ||
1884         Opcode == ARM::MOV_ga_pcrel_ldr ||
1885         Opcode == ARM::t2MOV_ga_pcrel)
1886       // Ignore the PC labels.
1887       return MO0.getGlobal() == MO1.getGlobal();
1888 
1889     const MachineFunction *MF = MI0.getParent()->getParent();
1890     const MachineConstantPool *MCP = MF->getConstantPool();
1891     int CPI0 = MO0.getIndex();
1892     int CPI1 = MO1.getIndex();
1893     const MachineConstantPoolEntry &MCPE0 = MCP->getConstants()[CPI0];
1894     const MachineConstantPoolEntry &MCPE1 = MCP->getConstants()[CPI1];
1895     bool isARMCP0 = MCPE0.isMachineConstantPoolEntry();
1896     bool isARMCP1 = MCPE1.isMachineConstantPoolEntry();
1897     if (isARMCP0 && isARMCP1) {
1898       ARMConstantPoolValue *ACPV0 =
1899         static_cast<ARMConstantPoolValue*>(MCPE0.Val.MachineCPVal);
1900       ARMConstantPoolValue *ACPV1 =
1901         static_cast<ARMConstantPoolValue*>(MCPE1.Val.MachineCPVal);
1902       return ACPV0->hasSameValue(ACPV1);
1903     } else if (!isARMCP0 && !isARMCP1) {
1904       return MCPE0.Val.ConstVal == MCPE1.Val.ConstVal;
1905     }
1906     return false;
1907   } else if (Opcode == ARM::PICLDR) {
1908     if (MI1.getOpcode() != Opcode)
1909       return false;
1910     if (MI0.getNumOperands() != MI1.getNumOperands())
1911       return false;
1912 
1913     Register Addr0 = MI0.getOperand(1).getReg();
1914     Register Addr1 = MI1.getOperand(1).getReg();
1915     if (Addr0 != Addr1) {
1916       if (!MRI || !Register::isVirtualRegister(Addr0) ||
1917           !Register::isVirtualRegister(Addr1))
1918         return false;
1919 
1920       // This assumes SSA form.
1921       MachineInstr *Def0 = MRI->getVRegDef(Addr0);
1922       MachineInstr *Def1 = MRI->getVRegDef(Addr1);
1923       // Check if the loaded value, e.g. a constantpool of a global address, are
1924       // the same.
1925       if (!produceSameValue(*Def0, *Def1, MRI))
1926         return false;
1927     }
1928 
1929     for (unsigned i = 3, e = MI0.getNumOperands(); i != e; ++i) {
1930       // %12 = PICLDR %11, 0, 14, %noreg
1931       const MachineOperand &MO0 = MI0.getOperand(i);
1932       const MachineOperand &MO1 = MI1.getOperand(i);
1933       if (!MO0.isIdenticalTo(MO1))
1934         return false;
1935     }
1936     return true;
1937   }
1938 
1939   return MI0.isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
1940 }
1941 
1942 /// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to
1943 /// determine if two loads are loading from the same base address. It should
1944 /// only return true if the base pointers are the same and the only differences
1945 /// between the two addresses is the offset. It also returns the offsets by
1946 /// reference.
1947 ///
1948 /// FIXME: remove this in favor of the MachineInstr interface once pre-RA-sched
1949 /// is permanently disabled.
1950 bool ARMBaseInstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
1951                                                int64_t &Offset1,
1952                                                int64_t &Offset2) const {
1953   // Don't worry about Thumb: just ARM and Thumb2.
1954   if (Subtarget.isThumb1Only()) return false;
1955 
1956   if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode())
1957     return false;
1958 
1959   switch (Load1->getMachineOpcode()) {
1960   default:
1961     return false;
1962   case ARM::LDRi12:
1963   case ARM::LDRBi12:
1964   case ARM::LDRD:
1965   case ARM::LDRH:
1966   case ARM::LDRSB:
1967   case ARM::LDRSH:
1968   case ARM::VLDRD:
1969   case ARM::VLDRS:
1970   case ARM::t2LDRi8:
1971   case ARM::t2LDRBi8:
1972   case ARM::t2LDRDi8:
1973   case ARM::t2LDRSHi8:
1974   case ARM::t2LDRi12:
1975   case ARM::t2LDRBi12:
1976   case ARM::t2LDRSHi12:
1977     break;
1978   }
1979 
1980   switch (Load2->getMachineOpcode()) {
1981   default:
1982     return false;
1983   case ARM::LDRi12:
1984   case ARM::LDRBi12:
1985   case ARM::LDRD:
1986   case ARM::LDRH:
1987   case ARM::LDRSB:
1988   case ARM::LDRSH:
1989   case ARM::VLDRD:
1990   case ARM::VLDRS:
1991   case ARM::t2LDRi8:
1992   case ARM::t2LDRBi8:
1993   case ARM::t2LDRSHi8:
1994   case ARM::t2LDRi12:
1995   case ARM::t2LDRBi12:
1996   case ARM::t2LDRSHi12:
1997     break;
1998   }
1999 
2000   // Check if base addresses and chain operands match.
2001   if (Load1->getOperand(0) != Load2->getOperand(0) ||
2002       Load1->getOperand(4) != Load2->getOperand(4))
2003     return false;
2004 
2005   // Index should be Reg0.
2006   if (Load1->getOperand(3) != Load2->getOperand(3))
2007     return false;
2008 
2009   // Determine the offsets.
2010   if (isa<ConstantSDNode>(Load1->getOperand(1)) &&
2011       isa<ConstantSDNode>(Load2->getOperand(1))) {
2012     Offset1 = cast<ConstantSDNode>(Load1->getOperand(1))->getSExtValue();
2013     Offset2 = cast<ConstantSDNode>(Load2->getOperand(1))->getSExtValue();
2014     return true;
2015   }
2016 
2017   return false;
2018 }
2019 
2020 /// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to
2021 /// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should
2022 /// be scheduled togther. On some targets if two loads are loading from
2023 /// addresses in the same cache line, it's better if they are scheduled
2024 /// together. This function takes two integers that represent the load offsets
2025 /// from the common base address. It returns true if it decides it's desirable
2026 /// to schedule the two loads together. "NumLoads" is the number of loads that
2027 /// have already been scheduled after Load1.
2028 ///
2029 /// FIXME: remove this in favor of the MachineInstr interface once pre-RA-sched
2030 /// is permanently disabled.
2031 bool ARMBaseInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
2032                                                int64_t Offset1, int64_t Offset2,
2033                                                unsigned NumLoads) const {
2034   // Don't worry about Thumb: just ARM and Thumb2.
2035   if (Subtarget.isThumb1Only()) return false;
2036 
2037   assert(Offset2 > Offset1);
2038 
2039   if ((Offset2 - Offset1) / 8 > 64)
2040     return false;
2041 
2042   // Check if the machine opcodes are different. If they are different
2043   // then we consider them to not be of the same base address,
2044   // EXCEPT in the case of Thumb2 byte loads where one is LDRBi8 and the other LDRBi12.
2045   // In this case, they are considered to be the same because they are different
2046   // encoding forms of the same basic instruction.
2047   if ((Load1->getMachineOpcode() != Load2->getMachineOpcode()) &&
2048       !((Load1->getMachineOpcode() == ARM::t2LDRBi8 &&
2049          Load2->getMachineOpcode() == ARM::t2LDRBi12) ||
2050         (Load1->getMachineOpcode() == ARM::t2LDRBi12 &&
2051          Load2->getMachineOpcode() == ARM::t2LDRBi8)))
2052     return false;  // FIXME: overly conservative?
2053 
2054   // Four loads in a row should be sufficient.
2055   if (NumLoads >= 3)
2056     return false;
2057 
2058   return true;
2059 }
2060 
2061 bool ARMBaseInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
2062                                             const MachineBasicBlock *MBB,
2063                                             const MachineFunction &MF) const {
2064   // Debug info is never a scheduling boundary. It's necessary to be explicit
2065   // due to the special treatment of IT instructions below, otherwise a
2066   // dbg_value followed by an IT will result in the IT instruction being
2067   // considered a scheduling hazard, which is wrong. It should be the actual
2068   // instruction preceding the dbg_value instruction(s), just like it is
2069   // when debug info is not present.
2070   if (MI.isDebugInstr())
2071     return false;
2072 
2073   // Terminators and labels can't be scheduled around.
2074   if (MI.isTerminator() || MI.isPosition())
2075     return true;
2076 
2077   // INLINEASM_BR can jump to another block
2078   if (MI.getOpcode() == TargetOpcode::INLINEASM_BR)
2079     return true;
2080 
2081   // Treat the start of the IT block as a scheduling boundary, but schedule
2082   // t2IT along with all instructions following it.
2083   // FIXME: This is a big hammer. But the alternative is to add all potential
2084   // true and anti dependencies to IT block instructions as implicit operands
2085   // to the t2IT instruction. The added compile time and complexity does not
2086   // seem worth it.
2087   MachineBasicBlock::const_iterator I = MI;
2088   // Make sure to skip any debug instructions
2089   while (++I != MBB->end() && I->isDebugInstr())
2090     ;
2091   if (I != MBB->end() && I->getOpcode() == ARM::t2IT)
2092     return true;
2093 
2094   // Don't attempt to schedule around any instruction that defines
2095   // a stack-oriented pointer, as it's unlikely to be profitable. This
2096   // saves compile time, because it doesn't require every single
2097   // stack slot reference to depend on the instruction that does the
2098   // modification.
2099   // Calls don't actually change the stack pointer, even if they have imp-defs.
2100   // No ARM calling conventions change the stack pointer. (X86 calling
2101   // conventions sometimes do).
2102   if (!MI.isCall() && MI.definesRegister(ARM::SP))
2103     return true;
2104 
2105   return false;
2106 }
2107 
2108 bool ARMBaseInstrInfo::
2109 isProfitableToIfCvt(MachineBasicBlock &MBB,
2110                     unsigned NumCycles, unsigned ExtraPredCycles,
2111                     BranchProbability Probability) const {
2112   if (!NumCycles)
2113     return false;
2114 
2115   // If we are optimizing for size, see if the branch in the predecessor can be
2116   // lowered to cbn?z by the constant island lowering pass, and return false if
2117   // so. This results in a shorter instruction sequence.
2118   if (MBB.getParent()->getFunction().hasOptSize()) {
2119     MachineBasicBlock *Pred = *MBB.pred_begin();
2120     if (!Pred->empty()) {
2121       MachineInstr *LastMI = &*Pred->rbegin();
2122       if (LastMI->getOpcode() == ARM::t2Bcc) {
2123         const TargetRegisterInfo *TRI = &getRegisterInfo();
2124         MachineInstr *CmpMI = findCMPToFoldIntoCBZ(LastMI, TRI);
2125         if (CmpMI)
2126           return false;
2127       }
2128     }
2129   }
2130   return isProfitableToIfCvt(MBB, NumCycles, ExtraPredCycles,
2131                              MBB, 0, 0, Probability);
2132 }
2133 
2134 bool ARMBaseInstrInfo::
2135 isProfitableToIfCvt(MachineBasicBlock &TBB,
2136                     unsigned TCycles, unsigned TExtra,
2137                     MachineBasicBlock &FBB,
2138                     unsigned FCycles, unsigned FExtra,
2139                     BranchProbability Probability) const {
2140   if (!TCycles)
2141     return false;
2142 
2143   // In thumb code we often end up trading one branch for a IT block, and
2144   // if we are cloning the instruction can increase code size. Prevent
2145   // blocks with multiple predecesors from being ifcvted to prevent this
2146   // cloning.
2147   if (Subtarget.isThumb2() && TBB.getParent()->getFunction().hasMinSize()) {
2148     if (TBB.pred_size() != 1 || FBB.pred_size() != 1)
2149       return false;
2150   }
2151 
2152   // Attempt to estimate the relative costs of predication versus branching.
2153   // Here we scale up each component of UnpredCost to avoid precision issue when
2154   // scaling TCycles/FCycles by Probability.
2155   const unsigned ScalingUpFactor = 1024;
2156 
2157   unsigned PredCost = (TCycles + FCycles + TExtra + FExtra) * ScalingUpFactor;
2158   unsigned UnpredCost;
2159   if (!Subtarget.hasBranchPredictor()) {
2160     // When we don't have a branch predictor it's always cheaper to not take a
2161     // branch than take it, so we have to take that into account.
2162     unsigned NotTakenBranchCost = 1;
2163     unsigned TakenBranchCost = Subtarget.getMispredictionPenalty();
2164     unsigned TUnpredCycles, FUnpredCycles;
2165     if (!FCycles) {
2166       // Triangle: TBB is the fallthrough
2167       TUnpredCycles = TCycles + NotTakenBranchCost;
2168       FUnpredCycles = TakenBranchCost;
2169     } else {
2170       // Diamond: TBB is the block that is branched to, FBB is the fallthrough
2171       TUnpredCycles = TCycles + TakenBranchCost;
2172       FUnpredCycles = FCycles + NotTakenBranchCost;
2173       // The branch at the end of FBB will disappear when it's predicated, so
2174       // discount it from PredCost.
2175       PredCost -= 1 * ScalingUpFactor;
2176     }
2177     // The total cost is the cost of each path scaled by their probabilites
2178     unsigned TUnpredCost = Probability.scale(TUnpredCycles * ScalingUpFactor);
2179     unsigned FUnpredCost = Probability.getCompl().scale(FUnpredCycles * ScalingUpFactor);
2180     UnpredCost = TUnpredCost + FUnpredCost;
2181     // When predicating assume that the first IT can be folded away but later
2182     // ones cost one cycle each
2183     if (Subtarget.isThumb2() && TCycles + FCycles > 4) {
2184       PredCost += ((TCycles + FCycles - 4) / 4) * ScalingUpFactor;
2185     }
2186   } else {
2187     unsigned TUnpredCost = Probability.scale(TCycles * ScalingUpFactor);
2188     unsigned FUnpredCost =
2189       Probability.getCompl().scale(FCycles * ScalingUpFactor);
2190     UnpredCost = TUnpredCost + FUnpredCost;
2191     UnpredCost += 1 * ScalingUpFactor; // The branch itself
2192     UnpredCost += Subtarget.getMispredictionPenalty() * ScalingUpFactor / 10;
2193   }
2194 
2195   return PredCost <= UnpredCost;
2196 }
2197 
2198 unsigned
2199 ARMBaseInstrInfo::extraSizeToPredicateInstructions(const MachineFunction &MF,
2200                                                    unsigned NumInsts) const {
2201   // Thumb2 needs a 2-byte IT instruction to predicate up to 4 instructions.
2202   // ARM has a condition code field in every predicable instruction, using it
2203   // doesn't change code size.
2204   if (!Subtarget.isThumb2())
2205     return 0;
2206 
2207   // It's possible that the size of the IT is restricted to a single block.
2208   unsigned MaxInsts = Subtarget.restrictIT() ? 1 : 4;
2209   return divideCeil(NumInsts, MaxInsts) * 2;
2210 }
2211 
2212 unsigned
2213 ARMBaseInstrInfo::predictBranchSizeForIfCvt(MachineInstr &MI) const {
2214   // If this branch is likely to be folded into the comparison to form a
2215   // CB(N)Z, then removing it won't reduce code size at all, because that will
2216   // just replace the CB(N)Z with a CMP.
2217   if (MI.getOpcode() == ARM::t2Bcc &&
2218       findCMPToFoldIntoCBZ(&MI, &getRegisterInfo()))
2219     return 0;
2220 
2221   unsigned Size = getInstSizeInBytes(MI);
2222 
2223   // For Thumb2, all branches are 32-bit instructions during the if conversion
2224   // pass, but may be replaced with 16-bit instructions during size reduction.
2225   // Since the branches considered by if conversion tend to be forward branches
2226   // over small basic blocks, they are very likely to be in range for the
2227   // narrow instructions, so we assume the final code size will be half what it
2228   // currently is.
2229   if (Subtarget.isThumb2())
2230     Size /= 2;
2231 
2232   return Size;
2233 }
2234 
2235 bool
2236 ARMBaseInstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB,
2237                                             MachineBasicBlock &FMBB) const {
2238   // Reduce false anti-dependencies to let the target's out-of-order execution
2239   // engine do its thing.
2240   return Subtarget.isProfitableToUnpredicate();
2241 }
2242 
2243 /// getInstrPredicate - If instruction is predicated, returns its predicate
2244 /// condition, otherwise returns AL. It also returns the condition code
2245 /// register by reference.
2246 ARMCC::CondCodes llvm::getInstrPredicate(const MachineInstr &MI,
2247                                          Register &PredReg) {
2248   int PIdx = MI.findFirstPredOperandIdx();
2249   if (PIdx == -1) {
2250     PredReg = 0;
2251     return ARMCC::AL;
2252   }
2253 
2254   PredReg = MI.getOperand(PIdx+1).getReg();
2255   return (ARMCC::CondCodes)MI.getOperand(PIdx).getImm();
2256 }
2257 
2258 unsigned llvm::getMatchingCondBranchOpcode(unsigned Opc) {
2259   if (Opc == ARM::B)
2260     return ARM::Bcc;
2261   if (Opc == ARM::tB)
2262     return ARM::tBcc;
2263   if (Opc == ARM::t2B)
2264     return ARM::t2Bcc;
2265 
2266   llvm_unreachable("Unknown unconditional branch opcode!");
2267 }
2268 
2269 MachineInstr *ARMBaseInstrInfo::commuteInstructionImpl(MachineInstr &MI,
2270                                                        bool NewMI,
2271                                                        unsigned OpIdx1,
2272                                                        unsigned OpIdx2) const {
2273   switch (MI.getOpcode()) {
2274   case ARM::MOVCCr:
2275   case ARM::t2MOVCCr: {
2276     // MOVCC can be commuted by inverting the condition.
2277     Register PredReg;
2278     ARMCC::CondCodes CC = getInstrPredicate(MI, PredReg);
2279     // MOVCC AL can't be inverted. Shouldn't happen.
2280     if (CC == ARMCC::AL || PredReg != ARM::CPSR)
2281       return nullptr;
2282     MachineInstr *CommutedMI =
2283         TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
2284     if (!CommutedMI)
2285       return nullptr;
2286     // After swapping the MOVCC operands, also invert the condition.
2287     CommutedMI->getOperand(CommutedMI->findFirstPredOperandIdx())
2288         .setImm(ARMCC::getOppositeCondition(CC));
2289     return CommutedMI;
2290   }
2291   }
2292   return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
2293 }
2294 
2295 /// Identify instructions that can be folded into a MOVCC instruction, and
2296 /// return the defining instruction.
2297 MachineInstr *
2298 ARMBaseInstrInfo::canFoldIntoMOVCC(Register Reg, const MachineRegisterInfo &MRI,
2299                                    const TargetInstrInfo *TII) const {
2300   if (!Reg.isVirtual())
2301     return nullptr;
2302   if (!MRI.hasOneNonDBGUse(Reg))
2303     return nullptr;
2304   MachineInstr *MI = MRI.getVRegDef(Reg);
2305   if (!MI)
2306     return nullptr;
2307   // Check if MI can be predicated and folded into the MOVCC.
2308   if (!isPredicable(*MI))
2309     return nullptr;
2310   // Check if MI has any non-dead defs or physreg uses. This also detects
2311   // predicated instructions which will be reading CPSR.
2312   for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 1)) {
2313     // Reject frame index operands, PEI can't handle the predicated pseudos.
2314     if (MO.isFI() || MO.isCPI() || MO.isJTI())
2315       return nullptr;
2316     if (!MO.isReg())
2317       continue;
2318     // MI can't have any tied operands, that would conflict with predication.
2319     if (MO.isTied())
2320       return nullptr;
2321     if (Register::isPhysicalRegister(MO.getReg()))
2322       return nullptr;
2323     if (MO.isDef() && !MO.isDead())
2324       return nullptr;
2325   }
2326   bool DontMoveAcrossStores = true;
2327   if (!MI->isSafeToMove(/* AliasAnalysis = */ nullptr, DontMoveAcrossStores))
2328     return nullptr;
2329   return MI;
2330 }
2331 
2332 bool ARMBaseInstrInfo::analyzeSelect(const MachineInstr &MI,
2333                                      SmallVectorImpl<MachineOperand> &Cond,
2334                                      unsigned &TrueOp, unsigned &FalseOp,
2335                                      bool &Optimizable) const {
2336   assert((MI.getOpcode() == ARM::MOVCCr || MI.getOpcode() == ARM::t2MOVCCr) &&
2337          "Unknown select instruction");
2338   // MOVCC operands:
2339   // 0: Def.
2340   // 1: True use.
2341   // 2: False use.
2342   // 3: Condition code.
2343   // 4: CPSR use.
2344   TrueOp = 1;
2345   FalseOp = 2;
2346   Cond.push_back(MI.getOperand(3));
2347   Cond.push_back(MI.getOperand(4));
2348   // We can always fold a def.
2349   Optimizable = true;
2350   return false;
2351 }
2352 
2353 MachineInstr *
2354 ARMBaseInstrInfo::optimizeSelect(MachineInstr &MI,
2355                                  SmallPtrSetImpl<MachineInstr *> &SeenMIs,
2356                                  bool PreferFalse) const {
2357   assert((MI.getOpcode() == ARM::MOVCCr || MI.getOpcode() == ARM::t2MOVCCr) &&
2358          "Unknown select instruction");
2359   MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
2360   MachineInstr *DefMI = canFoldIntoMOVCC(MI.getOperand(2).getReg(), MRI, this);
2361   bool Invert = !DefMI;
2362   if (!DefMI)
2363     DefMI = canFoldIntoMOVCC(MI.getOperand(1).getReg(), MRI, this);
2364   if (!DefMI)
2365     return nullptr;
2366 
2367   // Find new register class to use.
2368   MachineOperand FalseReg = MI.getOperand(Invert ? 2 : 1);
2369   MachineOperand TrueReg = MI.getOperand(Invert ? 1 : 2);
2370   Register DestReg = MI.getOperand(0).getReg();
2371   const TargetRegisterClass *FalseClass = MRI.getRegClass(FalseReg.getReg());
2372   const TargetRegisterClass *TrueClass = MRI.getRegClass(TrueReg.getReg());
2373   if (!MRI.constrainRegClass(DestReg, FalseClass))
2374     return nullptr;
2375   if (!MRI.constrainRegClass(DestReg, TrueClass))
2376     return nullptr;
2377 
2378   // Create a new predicated version of DefMI.
2379   // Rfalse is the first use.
2380   MachineInstrBuilder NewMI =
2381       BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), DefMI->getDesc(), DestReg);
2382 
2383   // Copy all the DefMI operands, excluding its (null) predicate.
2384   const MCInstrDesc &DefDesc = DefMI->getDesc();
2385   for (unsigned i = 1, e = DefDesc.getNumOperands();
2386        i != e && !DefDesc.OpInfo[i].isPredicate(); ++i)
2387     NewMI.add(DefMI->getOperand(i));
2388 
2389   unsigned CondCode = MI.getOperand(3).getImm();
2390   if (Invert)
2391     NewMI.addImm(ARMCC::getOppositeCondition(ARMCC::CondCodes(CondCode)));
2392   else
2393     NewMI.addImm(CondCode);
2394   NewMI.add(MI.getOperand(4));
2395 
2396   // DefMI is not the -S version that sets CPSR, so add an optional %noreg.
2397   if (NewMI->hasOptionalDef())
2398     NewMI.add(condCodeOp());
2399 
2400   // The output register value when the predicate is false is an implicit
2401   // register operand tied to the first def.
2402   // The tie makes the register allocator ensure the FalseReg is allocated the
2403   // same register as operand 0.
2404   FalseReg.setImplicit();
2405   NewMI.add(FalseReg);
2406   NewMI->tieOperands(0, NewMI->getNumOperands() - 1);
2407 
2408   // Update SeenMIs set: register newly created MI and erase removed DefMI.
2409   SeenMIs.insert(NewMI);
2410   SeenMIs.erase(DefMI);
2411 
2412   // If MI is inside a loop, and DefMI is outside the loop, then kill flags on
2413   // DefMI would be invalid when tranferred inside the loop.  Checking for a
2414   // loop is expensive, but at least remove kill flags if they are in different
2415   // BBs.
2416   if (DefMI->getParent() != MI.getParent())
2417     NewMI->clearKillInfo();
2418 
2419   // The caller will erase MI, but not DefMI.
2420   DefMI->eraseFromParent();
2421   return NewMI;
2422 }
2423 
2424 /// Map pseudo instructions that imply an 'S' bit onto real opcodes. Whether the
2425 /// instruction is encoded with an 'S' bit is determined by the optional CPSR
2426 /// def operand.
2427 ///
2428 /// This will go away once we can teach tblgen how to set the optional CPSR def
2429 /// operand itself.
2430 struct AddSubFlagsOpcodePair {
2431   uint16_t PseudoOpc;
2432   uint16_t MachineOpc;
2433 };
2434 
2435 static const AddSubFlagsOpcodePair AddSubFlagsOpcodeMap[] = {
2436   {ARM::ADDSri, ARM::ADDri},
2437   {ARM::ADDSrr, ARM::ADDrr},
2438   {ARM::ADDSrsi, ARM::ADDrsi},
2439   {ARM::ADDSrsr, ARM::ADDrsr},
2440 
2441   {ARM::SUBSri, ARM::SUBri},
2442   {ARM::SUBSrr, ARM::SUBrr},
2443   {ARM::SUBSrsi, ARM::SUBrsi},
2444   {ARM::SUBSrsr, ARM::SUBrsr},
2445 
2446   {ARM::RSBSri, ARM::RSBri},
2447   {ARM::RSBSrsi, ARM::RSBrsi},
2448   {ARM::RSBSrsr, ARM::RSBrsr},
2449 
2450   {ARM::tADDSi3, ARM::tADDi3},
2451   {ARM::tADDSi8, ARM::tADDi8},
2452   {ARM::tADDSrr, ARM::tADDrr},
2453   {ARM::tADCS, ARM::tADC},
2454 
2455   {ARM::tSUBSi3, ARM::tSUBi3},
2456   {ARM::tSUBSi8, ARM::tSUBi8},
2457   {ARM::tSUBSrr, ARM::tSUBrr},
2458   {ARM::tSBCS, ARM::tSBC},
2459   {ARM::tRSBS, ARM::tRSB},
2460   {ARM::tLSLSri, ARM::tLSLri},
2461 
2462   {ARM::t2ADDSri, ARM::t2ADDri},
2463   {ARM::t2ADDSrr, ARM::t2ADDrr},
2464   {ARM::t2ADDSrs, ARM::t2ADDrs},
2465 
2466   {ARM::t2SUBSri, ARM::t2SUBri},
2467   {ARM::t2SUBSrr, ARM::t2SUBrr},
2468   {ARM::t2SUBSrs, ARM::t2SUBrs},
2469 
2470   {ARM::t2RSBSri, ARM::t2RSBri},
2471   {ARM::t2RSBSrs, ARM::t2RSBrs},
2472 };
2473 
2474 unsigned llvm::convertAddSubFlagsOpcode(unsigned OldOpc) {
2475   for (unsigned i = 0, e = array_lengthof(AddSubFlagsOpcodeMap); i != e; ++i)
2476     if (OldOpc == AddSubFlagsOpcodeMap[i].PseudoOpc)
2477       return AddSubFlagsOpcodeMap[i].MachineOpc;
2478   return 0;
2479 }
2480 
2481 void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB,
2482                                    MachineBasicBlock::iterator &MBBI,
2483                                    const DebugLoc &dl, Register DestReg,
2484                                    Register BaseReg, int NumBytes,
2485                                    ARMCC::CondCodes Pred, Register PredReg,
2486                                    const ARMBaseInstrInfo &TII,
2487                                    unsigned MIFlags) {
2488   if (NumBytes == 0 && DestReg != BaseReg) {
2489     BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), DestReg)
2490         .addReg(BaseReg, RegState::Kill)
2491         .add(predOps(Pred, PredReg))
2492         .add(condCodeOp())
2493         .setMIFlags(MIFlags);
2494     return;
2495   }
2496 
2497   bool isSub = NumBytes < 0;
2498   if (isSub) NumBytes = -NumBytes;
2499 
2500   while (NumBytes) {
2501     unsigned RotAmt = ARM_AM::getSOImmValRotate(NumBytes);
2502     unsigned ThisVal = NumBytes & ARM_AM::rotr32(0xFF, RotAmt);
2503     assert(ThisVal && "Didn't extract field correctly");
2504 
2505     // We will handle these bits from offset, clear them.
2506     NumBytes &= ~ThisVal;
2507 
2508     assert(ARM_AM::getSOImmVal(ThisVal) != -1 && "Bit extraction didn't work?");
2509 
2510     // Build the new ADD / SUB.
2511     unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri;
2512     BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
2513         .addReg(BaseReg, RegState::Kill)
2514         .addImm(ThisVal)
2515         .add(predOps(Pred, PredReg))
2516         .add(condCodeOp())
2517         .setMIFlags(MIFlags);
2518     BaseReg = DestReg;
2519   }
2520 }
2521 
2522 bool llvm::tryFoldSPUpdateIntoPushPop(const ARMSubtarget &Subtarget,
2523                                       MachineFunction &MF, MachineInstr *MI,
2524                                       unsigned NumBytes) {
2525   // This optimisation potentially adds lots of load and store
2526   // micro-operations, it's only really a great benefit to code-size.
2527   if (!Subtarget.hasMinSize())
2528     return false;
2529 
2530   // If only one register is pushed/popped, LLVM can use an LDR/STR
2531   // instead. We can't modify those so make sure we're dealing with an
2532   // instruction we understand.
2533   bool IsPop = isPopOpcode(MI->getOpcode());
2534   bool IsPush = isPushOpcode(MI->getOpcode());
2535   if (!IsPush && !IsPop)
2536     return false;
2537 
2538   bool IsVFPPushPop = MI->getOpcode() == ARM::VSTMDDB_UPD ||
2539                       MI->getOpcode() == ARM::VLDMDIA_UPD;
2540   bool IsT1PushPop = MI->getOpcode() == ARM::tPUSH ||
2541                      MI->getOpcode() == ARM::tPOP ||
2542                      MI->getOpcode() == ARM::tPOP_RET;
2543 
2544   assert((IsT1PushPop || (MI->getOperand(0).getReg() == ARM::SP &&
2545                           MI->getOperand(1).getReg() == ARM::SP)) &&
2546          "trying to fold sp update into non-sp-updating push/pop");
2547 
2548   // The VFP push & pop act on D-registers, so we can only fold an adjustment
2549   // by a multiple of 8 bytes in correctly. Similarly rN is 4-bytes. Don't try
2550   // if this is violated.
2551   if (NumBytes % (IsVFPPushPop ? 8 : 4) != 0)
2552     return false;
2553 
2554   // ARM and Thumb2 push/pop insts have explicit "sp, sp" operands (+
2555   // pred) so the list starts at 4. Thumb1 starts after the predicate.
2556   int RegListIdx = IsT1PushPop ? 2 : 4;
2557 
2558   // Calculate the space we'll need in terms of registers.
2559   unsigned RegsNeeded;
2560   const TargetRegisterClass *RegClass;
2561   if (IsVFPPushPop) {
2562     RegsNeeded = NumBytes / 8;
2563     RegClass = &ARM::DPRRegClass;
2564   } else {
2565     RegsNeeded = NumBytes / 4;
2566     RegClass = &ARM::GPRRegClass;
2567   }
2568 
2569   // We're going to have to strip all list operands off before
2570   // re-adding them since the order matters, so save the existing ones
2571   // for later.
2572   SmallVector<MachineOperand, 4> RegList;
2573 
2574   // We're also going to need the first register transferred by this
2575   // instruction, which won't necessarily be the first register in the list.
2576   unsigned FirstRegEnc = -1;
2577 
2578   const TargetRegisterInfo *TRI = MF.getRegInfo().getTargetRegisterInfo();
2579   for (int i = MI->getNumOperands() - 1; i >= RegListIdx; --i) {
2580     MachineOperand &MO = MI->getOperand(i);
2581     RegList.push_back(MO);
2582 
2583     if (MO.isReg() && !MO.isImplicit() &&
2584         TRI->getEncodingValue(MO.getReg()) < FirstRegEnc)
2585       FirstRegEnc = TRI->getEncodingValue(MO.getReg());
2586   }
2587 
2588   const MCPhysReg *CSRegs = TRI->getCalleeSavedRegs(&MF);
2589 
2590   // Now try to find enough space in the reglist to allocate NumBytes.
2591   for (int CurRegEnc = FirstRegEnc - 1; CurRegEnc >= 0 && RegsNeeded;
2592        --CurRegEnc) {
2593     unsigned CurReg = RegClass->getRegister(CurRegEnc);
2594     if (IsT1PushPop && CurRegEnc > TRI->getEncodingValue(ARM::R7))
2595       continue;
2596     if (!IsPop) {
2597       // Pushing any register is completely harmless, mark the register involved
2598       // as undef since we don't care about its value and must not restore it
2599       // during stack unwinding.
2600       RegList.push_back(MachineOperand::CreateReg(CurReg, false, false,
2601                                                   false, false, true));
2602       --RegsNeeded;
2603       continue;
2604     }
2605 
2606     // However, we can only pop an extra register if it's not live. For
2607     // registers live within the function we might clobber a return value
2608     // register; the other way a register can be live here is if it's
2609     // callee-saved.
2610     if (isCalleeSavedRegister(CurReg, CSRegs) ||
2611         MI->getParent()->computeRegisterLiveness(TRI, CurReg, MI) !=
2612         MachineBasicBlock::LQR_Dead) {
2613       // VFP pops don't allow holes in the register list, so any skip is fatal
2614       // for our transformation. GPR pops do, so we should just keep looking.
2615       if (IsVFPPushPop)
2616         return false;
2617       else
2618         continue;
2619     }
2620 
2621     // Mark the unimportant registers as <def,dead> in the POP.
2622     RegList.push_back(MachineOperand::CreateReg(CurReg, true, false, false,
2623                                                 true));
2624     --RegsNeeded;
2625   }
2626 
2627   if (RegsNeeded > 0)
2628     return false;
2629 
2630   // Finally we know we can profitably perform the optimisation so go
2631   // ahead: strip all existing registers off and add them back again
2632   // in the right order.
2633   for (int i = MI->getNumOperands() - 1; i >= RegListIdx; --i)
2634     MI->RemoveOperand(i);
2635 
2636   // Add the complete list back in.
2637   MachineInstrBuilder MIB(MF, &*MI);
2638   for (int i = RegList.size() - 1; i >= 0; --i)
2639     MIB.add(RegList[i]);
2640 
2641   return true;
2642 }
2643 
2644 bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
2645                                 Register FrameReg, int &Offset,
2646                                 const ARMBaseInstrInfo &TII) {
2647   unsigned Opcode = MI.getOpcode();
2648   const MCInstrDesc &Desc = MI.getDesc();
2649   unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
2650   bool isSub = false;
2651 
2652   // Memory operands in inline assembly always use AddrMode2.
2653   if (Opcode == ARM::INLINEASM || Opcode == ARM::INLINEASM_BR)
2654     AddrMode = ARMII::AddrMode2;
2655 
2656   if (Opcode == ARM::ADDri) {
2657     Offset += MI.getOperand(FrameRegIdx+1).getImm();
2658     if (Offset == 0) {
2659       // Turn it into a move.
2660       MI.setDesc(TII.get(ARM::MOVr));
2661       MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
2662       MI.RemoveOperand(FrameRegIdx+1);
2663       Offset = 0;
2664       return true;
2665     } else if (Offset < 0) {
2666       Offset = -Offset;
2667       isSub = true;
2668       MI.setDesc(TII.get(ARM::SUBri));
2669     }
2670 
2671     // Common case: small offset, fits into instruction.
2672     if (ARM_AM::getSOImmVal(Offset) != -1) {
2673       // Replace the FrameIndex with sp / fp
2674       MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
2675       MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset);
2676       Offset = 0;
2677       return true;
2678     }
2679 
2680     // Otherwise, pull as much of the immedidate into this ADDri/SUBri
2681     // as possible.
2682     unsigned RotAmt = ARM_AM::getSOImmValRotate(Offset);
2683     unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xFF, RotAmt);
2684 
2685     // We will handle these bits from offset, clear them.
2686     Offset &= ~ThisImmVal;
2687 
2688     // Get the properly encoded SOImmVal field.
2689     assert(ARM_AM::getSOImmVal(ThisImmVal) != -1 &&
2690            "Bit extraction didn't work?");
2691     MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
2692  } else {
2693     unsigned ImmIdx = 0;
2694     int InstrOffs = 0;
2695     unsigned NumBits = 0;
2696     unsigned Scale = 1;
2697     switch (AddrMode) {
2698     case ARMII::AddrMode_i12:
2699       ImmIdx = FrameRegIdx + 1;
2700       InstrOffs = MI.getOperand(ImmIdx).getImm();
2701       NumBits = 12;
2702       break;
2703     case ARMII::AddrMode2:
2704       ImmIdx = FrameRegIdx+2;
2705       InstrOffs = ARM_AM::getAM2Offset(MI.getOperand(ImmIdx).getImm());
2706       if (ARM_AM::getAM2Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
2707         InstrOffs *= -1;
2708       NumBits = 12;
2709       break;
2710     case ARMII::AddrMode3:
2711       ImmIdx = FrameRegIdx+2;
2712       InstrOffs = ARM_AM::getAM3Offset(MI.getOperand(ImmIdx).getImm());
2713       if (ARM_AM::getAM3Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
2714         InstrOffs *= -1;
2715       NumBits = 8;
2716       break;
2717     case ARMII::AddrMode4:
2718     case ARMII::AddrMode6:
2719       // Can't fold any offset even if it's zero.
2720       return false;
2721     case ARMII::AddrMode5:
2722       ImmIdx = FrameRegIdx+1;
2723       InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm());
2724       if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
2725         InstrOffs *= -1;
2726       NumBits = 8;
2727       Scale = 4;
2728       break;
2729     case ARMII::AddrMode5FP16:
2730       ImmIdx = FrameRegIdx+1;
2731       InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm());
2732       if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
2733         InstrOffs *= -1;
2734       NumBits = 8;
2735       Scale = 2;
2736       break;
2737     case ARMII::AddrModeT2_i7:
2738     case ARMII::AddrModeT2_i7s2:
2739     case ARMII::AddrModeT2_i7s4:
2740       ImmIdx = FrameRegIdx+1;
2741       InstrOffs = MI.getOperand(ImmIdx).getImm();
2742       NumBits = 7;
2743       Scale = (AddrMode == ARMII::AddrModeT2_i7s2 ? 2 :
2744                AddrMode == ARMII::AddrModeT2_i7s4 ? 4 : 1);
2745       break;
2746     default:
2747       llvm_unreachable("Unsupported addressing mode!");
2748     }
2749 
2750     Offset += InstrOffs * Scale;
2751     assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
2752     if (Offset < 0) {
2753       Offset = -Offset;
2754       isSub = true;
2755     }
2756 
2757     // Attempt to fold address comp. if opcode has offset bits
2758     if (NumBits > 0) {
2759       // Common case: small offset, fits into instruction.
2760       MachineOperand &ImmOp = MI.getOperand(ImmIdx);
2761       int ImmedOffset = Offset / Scale;
2762       unsigned Mask = (1 << NumBits) - 1;
2763       if ((unsigned)Offset <= Mask * Scale) {
2764         // Replace the FrameIndex with sp
2765         MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
2766         // FIXME: When addrmode2 goes away, this will simplify (like the
2767         // T2 version), as the LDR.i12 versions don't need the encoding
2768         // tricks for the offset value.
2769         if (isSub) {
2770           if (AddrMode == ARMII::AddrMode_i12)
2771             ImmedOffset = -ImmedOffset;
2772           else
2773             ImmedOffset |= 1 << NumBits;
2774         }
2775         ImmOp.ChangeToImmediate(ImmedOffset);
2776         Offset = 0;
2777         return true;
2778       }
2779 
2780       // Otherwise, it didn't fit. Pull in what we can to simplify the immed.
2781       ImmedOffset = ImmedOffset & Mask;
2782       if (isSub) {
2783         if (AddrMode == ARMII::AddrMode_i12)
2784           ImmedOffset = -ImmedOffset;
2785         else
2786           ImmedOffset |= 1 << NumBits;
2787       }
2788       ImmOp.ChangeToImmediate(ImmedOffset);
2789       Offset &= ~(Mask*Scale);
2790     }
2791   }
2792 
2793   Offset = (isSub) ? -Offset : Offset;
2794   return Offset == 0;
2795 }
2796 
2797 /// analyzeCompare - For a comparison instruction, return the source registers
2798 /// in SrcReg and SrcReg2 if having two register operands, and the value it
2799 /// compares against in CmpValue. Return true if the comparison instruction
2800 /// can be analyzed.
2801 bool ARMBaseInstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg,
2802                                       Register &SrcReg2, int64_t &CmpMask,
2803                                       int64_t &CmpValue) const {
2804   switch (MI.getOpcode()) {
2805   default: break;
2806   case ARM::CMPri:
2807   case ARM::t2CMPri:
2808   case ARM::tCMPi8:
2809     SrcReg = MI.getOperand(0).getReg();
2810     SrcReg2 = 0;
2811     CmpMask = ~0;
2812     CmpValue = MI.getOperand(1).getImm();
2813     return true;
2814   case ARM::CMPrr:
2815   case ARM::t2CMPrr:
2816   case ARM::tCMPr:
2817     SrcReg = MI.getOperand(0).getReg();
2818     SrcReg2 = MI.getOperand(1).getReg();
2819     CmpMask = ~0;
2820     CmpValue = 0;
2821     return true;
2822   case ARM::TSTri:
2823   case ARM::t2TSTri:
2824     SrcReg = MI.getOperand(0).getReg();
2825     SrcReg2 = 0;
2826     CmpMask = MI.getOperand(1).getImm();
2827     CmpValue = 0;
2828     return true;
2829   }
2830 
2831   return false;
2832 }
2833 
2834 /// isSuitableForMask - Identify a suitable 'and' instruction that
2835 /// operates on the given source register and applies the same mask
2836 /// as a 'tst' instruction. Provide a limited look-through for copies.
2837 /// When successful, MI will hold the found instruction.
2838 static bool isSuitableForMask(MachineInstr *&MI, Register SrcReg,
2839                               int CmpMask, bool CommonUse) {
2840   switch (MI->getOpcode()) {
2841     case ARM::ANDri:
2842     case ARM::t2ANDri:
2843       if (CmpMask != MI->getOperand(2).getImm())
2844         return false;
2845       if (SrcReg == MI->getOperand(CommonUse ? 1 : 0).getReg())
2846         return true;
2847       break;
2848   }
2849 
2850   return false;
2851 }
2852 
2853 /// getCmpToAddCondition - assume the flags are set by CMP(a,b), return
2854 /// the condition code if we modify the instructions such that flags are
2855 /// set by ADD(a,b,X).
2856 inline static ARMCC::CondCodes getCmpToAddCondition(ARMCC::CondCodes CC) {
2857   switch (CC) {
2858   default: return ARMCC::AL;
2859   case ARMCC::HS: return ARMCC::LO;
2860   case ARMCC::LO: return ARMCC::HS;
2861   case ARMCC::VS: return ARMCC::VS;
2862   case ARMCC::VC: return ARMCC::VC;
2863   }
2864 }
2865 
2866 /// isRedundantFlagInstr - check whether the first instruction, whose only
2867 /// purpose is to update flags, can be made redundant.
2868 /// CMPrr can be made redundant by SUBrr if the operands are the same.
2869 /// CMPri can be made redundant by SUBri if the operands are the same.
2870 /// CMPrr(r0, r1) can be made redundant by ADDr[ri](r0, r1, X).
2871 /// This function can be extended later on.
2872 inline static bool isRedundantFlagInstr(const MachineInstr *CmpI,
2873                                         Register SrcReg, Register SrcReg2,
2874                                         int64_t ImmValue,
2875                                         const MachineInstr *OI,
2876                                         bool &IsThumb1) {
2877   if ((CmpI->getOpcode() == ARM::CMPrr || CmpI->getOpcode() == ARM::t2CMPrr) &&
2878       (OI->getOpcode() == ARM::SUBrr || OI->getOpcode() == ARM::t2SUBrr) &&
2879       ((OI->getOperand(1).getReg() == SrcReg &&
2880         OI->getOperand(2).getReg() == SrcReg2) ||
2881        (OI->getOperand(1).getReg() == SrcReg2 &&
2882         OI->getOperand(2).getReg() == SrcReg))) {
2883     IsThumb1 = false;
2884     return true;
2885   }
2886 
2887   if (CmpI->getOpcode() == ARM::tCMPr && OI->getOpcode() == ARM::tSUBrr &&
2888       ((OI->getOperand(2).getReg() == SrcReg &&
2889         OI->getOperand(3).getReg() == SrcReg2) ||
2890        (OI->getOperand(2).getReg() == SrcReg2 &&
2891         OI->getOperand(3).getReg() == SrcReg))) {
2892     IsThumb1 = true;
2893     return true;
2894   }
2895 
2896   if ((CmpI->getOpcode() == ARM::CMPri || CmpI->getOpcode() == ARM::t2CMPri) &&
2897       (OI->getOpcode() == ARM::SUBri || OI->getOpcode() == ARM::t2SUBri) &&
2898       OI->getOperand(1).getReg() == SrcReg &&
2899       OI->getOperand(2).getImm() == ImmValue) {
2900     IsThumb1 = false;
2901     return true;
2902   }
2903 
2904   if (CmpI->getOpcode() == ARM::tCMPi8 &&
2905       (OI->getOpcode() == ARM::tSUBi8 || OI->getOpcode() == ARM::tSUBi3) &&
2906       OI->getOperand(2).getReg() == SrcReg &&
2907       OI->getOperand(3).getImm() == ImmValue) {
2908     IsThumb1 = true;
2909     return true;
2910   }
2911 
2912   if ((CmpI->getOpcode() == ARM::CMPrr || CmpI->getOpcode() == ARM::t2CMPrr) &&
2913       (OI->getOpcode() == ARM::ADDrr || OI->getOpcode() == ARM::t2ADDrr ||
2914        OI->getOpcode() == ARM::ADDri || OI->getOpcode() == ARM::t2ADDri) &&
2915       OI->getOperand(0).isReg() && OI->getOperand(1).isReg() &&
2916       OI->getOperand(0).getReg() == SrcReg &&
2917       OI->getOperand(1).getReg() == SrcReg2) {
2918     IsThumb1 = false;
2919     return true;
2920   }
2921 
2922   if (CmpI->getOpcode() == ARM::tCMPr &&
2923       (OI->getOpcode() == ARM::tADDi3 || OI->getOpcode() == ARM::tADDi8 ||
2924        OI->getOpcode() == ARM::tADDrr) &&
2925       OI->getOperand(0).getReg() == SrcReg &&
2926       OI->getOperand(2).getReg() == SrcReg2) {
2927     IsThumb1 = true;
2928     return true;
2929   }
2930 
2931   return false;
2932 }
2933 
2934 static bool isOptimizeCompareCandidate(MachineInstr *MI, bool &IsThumb1) {
2935   switch (MI->getOpcode()) {
2936   default: return false;
2937   case ARM::tLSLri:
2938   case ARM::tLSRri:
2939   case ARM::tLSLrr:
2940   case ARM::tLSRrr:
2941   case ARM::tSUBrr:
2942   case ARM::tADDrr:
2943   case ARM::tADDi3:
2944   case ARM::tADDi8:
2945   case ARM::tSUBi3:
2946   case ARM::tSUBi8:
2947   case ARM::tMUL:
2948   case ARM::tADC:
2949   case ARM::tSBC:
2950   case ARM::tRSB:
2951   case ARM::tAND:
2952   case ARM::tORR:
2953   case ARM::tEOR:
2954   case ARM::tBIC:
2955   case ARM::tMVN:
2956   case ARM::tASRri:
2957   case ARM::tASRrr:
2958   case ARM::tROR:
2959     IsThumb1 = true;
2960     LLVM_FALLTHROUGH;
2961   case ARM::RSBrr:
2962   case ARM::RSBri:
2963   case ARM::RSCrr:
2964   case ARM::RSCri:
2965   case ARM::ADDrr:
2966   case ARM::ADDri:
2967   case ARM::ADCrr:
2968   case ARM::ADCri:
2969   case ARM::SUBrr:
2970   case ARM::SUBri:
2971   case ARM::SBCrr:
2972   case ARM::SBCri:
2973   case ARM::t2RSBri:
2974   case ARM::t2ADDrr:
2975   case ARM::t2ADDri:
2976   case ARM::t2ADCrr:
2977   case ARM::t2ADCri:
2978   case ARM::t2SUBrr:
2979   case ARM::t2SUBri:
2980   case ARM::t2SBCrr:
2981   case ARM::t2SBCri:
2982   case ARM::ANDrr:
2983   case ARM::ANDri:
2984   case ARM::t2ANDrr:
2985   case ARM::t2ANDri:
2986   case ARM::ORRrr:
2987   case ARM::ORRri:
2988   case ARM::t2ORRrr:
2989   case ARM::t2ORRri:
2990   case ARM::EORrr:
2991   case ARM::EORri:
2992   case ARM::t2EORrr:
2993   case ARM::t2EORri:
2994   case ARM::t2LSRri:
2995   case ARM::t2LSRrr:
2996   case ARM::t2LSLri:
2997   case ARM::t2LSLrr:
2998     return true;
2999   }
3000 }
3001 
3002 /// optimizeCompareInstr - Convert the instruction supplying the argument to the
3003 /// comparison into one that sets the zero bit in the flags register;
3004 /// Remove a redundant Compare instruction if an earlier instruction can set the
3005 /// flags in the same way as Compare.
3006 /// E.g. SUBrr(r1,r2) and CMPrr(r1,r2). We also handle the case where two
3007 /// operands are swapped: SUBrr(r1,r2) and CMPrr(r2,r1), by updating the
3008 /// condition code of instructions which use the flags.
3009 bool ARMBaseInstrInfo::optimizeCompareInstr(
3010     MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask,
3011     int64_t CmpValue, const MachineRegisterInfo *MRI) const {
3012   // Get the unique definition of SrcReg.
3013   MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
3014   if (!MI) return false;
3015 
3016   // Masked compares sometimes use the same register as the corresponding 'and'.
3017   if (CmpMask != ~0) {
3018     if (!isSuitableForMask(MI, SrcReg, CmpMask, false) || isPredicated(*MI)) {
3019       MI = nullptr;
3020       for (MachineRegisterInfo::use_instr_iterator
3021            UI = MRI->use_instr_begin(SrcReg), UE = MRI->use_instr_end();
3022            UI != UE; ++UI) {
3023         if (UI->getParent() != CmpInstr.getParent())
3024           continue;
3025         MachineInstr *PotentialAND = &*UI;
3026         if (!isSuitableForMask(PotentialAND, SrcReg, CmpMask, true) ||
3027             isPredicated(*PotentialAND))
3028           continue;
3029         MI = PotentialAND;
3030         break;
3031       }
3032       if (!MI) return false;
3033     }
3034   }
3035 
3036   // Get ready to iterate backward from CmpInstr.
3037   MachineBasicBlock::iterator I = CmpInstr, E = MI,
3038                               B = CmpInstr.getParent()->begin();
3039 
3040   // Early exit if CmpInstr is at the beginning of the BB.
3041   if (I == B) return false;
3042 
3043   // There are two possible candidates which can be changed to set CPSR:
3044   // One is MI, the other is a SUB or ADD instruction.
3045   // For CMPrr(r1,r2), we are looking for SUB(r1,r2), SUB(r2,r1), or
3046   // ADDr[ri](r1, r2, X).
3047   // For CMPri(r1, CmpValue), we are looking for SUBri(r1, CmpValue).
3048   MachineInstr *SubAdd = nullptr;
3049   if (SrcReg2 != 0)
3050     // MI is not a candidate for CMPrr.
3051     MI = nullptr;
3052   else if (MI->getParent() != CmpInstr.getParent() || CmpValue != 0) {
3053     // Conservatively refuse to convert an instruction which isn't in the same
3054     // BB as the comparison.
3055     // For CMPri w/ CmpValue != 0, a SubAdd may still be a candidate.
3056     // Thus we cannot return here.
3057     if (CmpInstr.getOpcode() == ARM::CMPri ||
3058         CmpInstr.getOpcode() == ARM::t2CMPri ||
3059         CmpInstr.getOpcode() == ARM::tCMPi8)
3060       MI = nullptr;
3061     else
3062       return false;
3063   }
3064 
3065   bool IsThumb1 = false;
3066   if (MI && !isOptimizeCompareCandidate(MI, IsThumb1))
3067     return false;
3068 
3069   // We also want to do this peephole for cases like this: if (a*b == 0),
3070   // and optimise away the CMP instruction from the generated code sequence:
3071   // MULS, MOVS, MOVS, CMP. Here the MOVS instructions load the boolean values
3072   // resulting from the select instruction, but these MOVS instructions for
3073   // Thumb1 (V6M) are flag setting and are thus preventing this optimisation.
3074   // However, if we only have MOVS instructions in between the CMP and the
3075   // other instruction (the MULS in this example), then the CPSR is dead so we
3076   // can safely reorder the sequence into: MOVS, MOVS, MULS, CMP. We do this
3077   // reordering and then continue the analysis hoping we can eliminate the
3078   // CMP. This peephole works on the vregs, so is still in SSA form. As a
3079   // consequence, the movs won't redefine/kill the MUL operands which would
3080   // make this reordering illegal.
3081   const TargetRegisterInfo *TRI = &getRegisterInfo();
3082   if (MI && IsThumb1) {
3083     --I;
3084     if (I != E && !MI->readsRegister(ARM::CPSR, TRI)) {
3085       bool CanReorder = true;
3086       for (; I != E; --I) {
3087         if (I->getOpcode() != ARM::tMOVi8) {
3088           CanReorder = false;
3089           break;
3090         }
3091       }
3092       if (CanReorder) {
3093         MI = MI->removeFromParent();
3094         E = CmpInstr;
3095         CmpInstr.getParent()->insert(E, MI);
3096       }
3097     }
3098     I = CmpInstr;
3099     E = MI;
3100   }
3101 
3102   // Check that CPSR isn't set between the comparison instruction and the one we
3103   // want to change. At the same time, search for SubAdd.
3104   bool SubAddIsThumb1 = false;
3105   do {
3106     const MachineInstr &Instr = *--I;
3107 
3108     // Check whether CmpInstr can be made redundant by the current instruction.
3109     if (isRedundantFlagInstr(&CmpInstr, SrcReg, SrcReg2, CmpValue, &Instr,
3110                              SubAddIsThumb1)) {
3111       SubAdd = &*I;
3112       break;
3113     }
3114 
3115     // Allow E (which was initially MI) to be SubAdd but do not search before E.
3116     if (I == E)
3117       break;
3118 
3119     if (Instr.modifiesRegister(ARM::CPSR, TRI) ||
3120         Instr.readsRegister(ARM::CPSR, TRI))
3121       // This instruction modifies or uses CPSR after the one we want to
3122       // change. We can't do this transformation.
3123       return false;
3124 
3125     if (I == B) {
3126       // In some cases, we scan the use-list of an instruction for an AND;
3127       // that AND is in the same BB, but may not be scheduled before the
3128       // corresponding TST.  In that case, bail out.
3129       //
3130       // FIXME: We could try to reschedule the AND.
3131       return false;
3132     }
3133   } while (true);
3134 
3135   // Return false if no candidates exist.
3136   if (!MI && !SubAdd)
3137     return false;
3138 
3139   // If we found a SubAdd, use it as it will be closer to the CMP
3140   if (SubAdd) {
3141     MI = SubAdd;
3142     IsThumb1 = SubAddIsThumb1;
3143   }
3144 
3145   // We can't use a predicated instruction - it doesn't always write the flags.
3146   if (isPredicated(*MI))
3147     return false;
3148 
3149   // Scan forward for the use of CPSR
3150   // When checking against MI: if it's a conditional code that requires
3151   // checking of the V bit or C bit, then this is not safe to do.
3152   // It is safe to remove CmpInstr if CPSR is redefined or killed.
3153   // If we are done with the basic block, we need to check whether CPSR is
3154   // live-out.
3155   SmallVector<std::pair<MachineOperand*, ARMCC::CondCodes>, 4>
3156       OperandsToUpdate;
3157   bool isSafe = false;
3158   I = CmpInstr;
3159   E = CmpInstr.getParent()->end();
3160   while (!isSafe && ++I != E) {
3161     const MachineInstr &Instr = *I;
3162     for (unsigned IO = 0, EO = Instr.getNumOperands();
3163          !isSafe && IO != EO; ++IO) {
3164       const MachineOperand &MO = Instr.getOperand(IO);
3165       if (MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR)) {
3166         isSafe = true;
3167         break;
3168       }
3169       if (!MO.isReg() || MO.getReg() != ARM::CPSR)
3170         continue;
3171       if (MO.isDef()) {
3172         isSafe = true;
3173         break;
3174       }
3175       // Condition code is after the operand before CPSR except for VSELs.
3176       ARMCC::CondCodes CC;
3177       bool IsInstrVSel = true;
3178       switch (Instr.getOpcode()) {
3179       default:
3180         IsInstrVSel = false;
3181         CC = (ARMCC::CondCodes)Instr.getOperand(IO - 1).getImm();
3182         break;
3183       case ARM::VSELEQD:
3184       case ARM::VSELEQS:
3185       case ARM::VSELEQH:
3186         CC = ARMCC::EQ;
3187         break;
3188       case ARM::VSELGTD:
3189       case ARM::VSELGTS:
3190       case ARM::VSELGTH:
3191         CC = ARMCC::GT;
3192         break;
3193       case ARM::VSELGED:
3194       case ARM::VSELGES:
3195       case ARM::VSELGEH:
3196         CC = ARMCC::GE;
3197         break;
3198       case ARM::VSELVSD:
3199       case ARM::VSELVSS:
3200       case ARM::VSELVSH:
3201         CC = ARMCC::VS;
3202         break;
3203       }
3204 
3205       if (SubAdd) {
3206         // If we have SUB(r1, r2) and CMP(r2, r1), the condition code based
3207         // on CMP needs to be updated to be based on SUB.
3208         // If we have ADD(r1, r2, X) and CMP(r1, r2), the condition code also
3209         // needs to be modified.
3210         // Push the condition code operands to OperandsToUpdate.
3211         // If it is safe to remove CmpInstr, the condition code of these
3212         // operands will be modified.
3213         unsigned Opc = SubAdd->getOpcode();
3214         bool IsSub = Opc == ARM::SUBrr || Opc == ARM::t2SUBrr ||
3215                      Opc == ARM::SUBri || Opc == ARM::t2SUBri ||
3216                      Opc == ARM::tSUBrr || Opc == ARM::tSUBi3 ||
3217                      Opc == ARM::tSUBi8;
3218         unsigned OpI = Opc != ARM::tSUBrr ? 1 : 2;
3219         if (!IsSub ||
3220             (SrcReg2 != 0 && SubAdd->getOperand(OpI).getReg() == SrcReg2 &&
3221              SubAdd->getOperand(OpI + 1).getReg() == SrcReg)) {
3222           // VSel doesn't support condition code update.
3223           if (IsInstrVSel)
3224             return false;
3225           // Ensure we can swap the condition.
3226           ARMCC::CondCodes NewCC = (IsSub ? getSwappedCondition(CC) : getCmpToAddCondition(CC));
3227           if (NewCC == ARMCC::AL)
3228             return false;
3229           OperandsToUpdate.push_back(
3230               std::make_pair(&((*I).getOperand(IO - 1)), NewCC));
3231         }
3232       } else {
3233         // No SubAdd, so this is x = <op> y, z; cmp x, 0.
3234         switch (CC) {
3235         case ARMCC::EQ: // Z
3236         case ARMCC::NE: // Z
3237         case ARMCC::MI: // N
3238         case ARMCC::PL: // N
3239         case ARMCC::AL: // none
3240           // CPSR can be used multiple times, we should continue.
3241           break;
3242         case ARMCC::HS: // C
3243         case ARMCC::LO: // C
3244         case ARMCC::VS: // V
3245         case ARMCC::VC: // V
3246         case ARMCC::HI: // C Z
3247         case ARMCC::LS: // C Z
3248         case ARMCC::GE: // N V
3249         case ARMCC::LT: // N V
3250         case ARMCC::GT: // Z N V
3251         case ARMCC::LE: // Z N V
3252           // The instruction uses the V bit or C bit which is not safe.
3253           return false;
3254         }
3255       }
3256     }
3257   }
3258 
3259   // If CPSR is not killed nor re-defined, we should check whether it is
3260   // live-out. If it is live-out, do not optimize.
3261   if (!isSafe) {
3262     MachineBasicBlock *MBB = CmpInstr.getParent();
3263     for (MachineBasicBlock *Succ : MBB->successors())
3264       if (Succ->isLiveIn(ARM::CPSR))
3265         return false;
3266   }
3267 
3268   // Toggle the optional operand to CPSR (if it exists - in Thumb1 we always
3269   // set CPSR so this is represented as an explicit output)
3270   if (!IsThumb1) {
3271     MI->getOperand(5).setReg(ARM::CPSR);
3272     MI->getOperand(5).setIsDef(true);
3273   }
3274   assert(!isPredicated(*MI) && "Can't use flags from predicated instruction");
3275   CmpInstr.eraseFromParent();
3276 
3277   // Modify the condition code of operands in OperandsToUpdate.
3278   // Since we have SUB(r1, r2) and CMP(r2, r1), the condition code needs to
3279   // be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc.
3280   for (unsigned i = 0, e = OperandsToUpdate.size(); i < e; i++)
3281     OperandsToUpdate[i].first->setImm(OperandsToUpdate[i].second);
3282 
3283   MI->clearRegisterDeads(ARM::CPSR);
3284 
3285   return true;
3286 }
3287 
3288 bool ARMBaseInstrInfo::shouldSink(const MachineInstr &MI) const {
3289   // Do not sink MI if it might be used to optimize a redundant compare.
3290   // We heuristically only look at the instruction immediately following MI to
3291   // avoid potentially searching the entire basic block.
3292   if (isPredicated(MI))
3293     return true;
3294   MachineBasicBlock::const_iterator Next = &MI;
3295   ++Next;
3296   Register SrcReg, SrcReg2;
3297   int64_t CmpMask, CmpValue;
3298   bool IsThumb1;
3299   if (Next != MI.getParent()->end() &&
3300       analyzeCompare(*Next, SrcReg, SrcReg2, CmpMask, CmpValue) &&
3301       isRedundantFlagInstr(&*Next, SrcReg, SrcReg2, CmpValue, &MI, IsThumb1))
3302     return false;
3303   return true;
3304 }
3305 
3306 bool ARMBaseInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
3307                                      Register Reg,
3308                                      MachineRegisterInfo *MRI) const {
3309   // Fold large immediates into add, sub, or, xor.
3310   unsigned DefOpc = DefMI.getOpcode();
3311   if (DefOpc != ARM::t2MOVi32imm && DefOpc != ARM::MOVi32imm)
3312     return false;
3313   if (!DefMI.getOperand(1).isImm())
3314     // Could be t2MOVi32imm @xx
3315     return false;
3316 
3317   if (!MRI->hasOneNonDBGUse(Reg))
3318     return false;
3319 
3320   const MCInstrDesc &DefMCID = DefMI.getDesc();
3321   if (DefMCID.hasOptionalDef()) {
3322     unsigned NumOps = DefMCID.getNumOperands();
3323     const MachineOperand &MO = DefMI.getOperand(NumOps - 1);
3324     if (MO.getReg() == ARM::CPSR && !MO.isDead())
3325       // If DefMI defines CPSR and it is not dead, it's obviously not safe
3326       // to delete DefMI.
3327       return false;
3328   }
3329 
3330   const MCInstrDesc &UseMCID = UseMI.getDesc();
3331   if (UseMCID.hasOptionalDef()) {
3332     unsigned NumOps = UseMCID.getNumOperands();
3333     if (UseMI.getOperand(NumOps - 1).getReg() == ARM::CPSR)
3334       // If the instruction sets the flag, do not attempt this optimization
3335       // since it may change the semantics of the code.
3336       return false;
3337   }
3338 
3339   unsigned UseOpc = UseMI.getOpcode();
3340   unsigned NewUseOpc = 0;
3341   uint32_t ImmVal = (uint32_t)DefMI.getOperand(1).getImm();
3342   uint32_t SOImmValV1 = 0, SOImmValV2 = 0;
3343   bool Commute = false;
3344   switch (UseOpc) {
3345   default: return false;
3346   case ARM::SUBrr:
3347   case ARM::ADDrr:
3348   case ARM::ORRrr:
3349   case ARM::EORrr:
3350   case ARM::t2SUBrr:
3351   case ARM::t2ADDrr:
3352   case ARM::t2ORRrr:
3353   case ARM::t2EORrr: {
3354     Commute = UseMI.getOperand(2).getReg() != Reg;
3355     switch (UseOpc) {
3356     default: break;
3357     case ARM::ADDrr:
3358     case ARM::SUBrr:
3359       if (UseOpc == ARM::SUBrr && Commute)
3360         return false;
3361 
3362       // ADD/SUB are special because they're essentially the same operation, so
3363       // we can handle a larger range of immediates.
3364       if (ARM_AM::isSOImmTwoPartVal(ImmVal))
3365         NewUseOpc = UseOpc == ARM::ADDrr ? ARM::ADDri : ARM::SUBri;
3366       else if (ARM_AM::isSOImmTwoPartVal(-ImmVal)) {
3367         ImmVal = -ImmVal;
3368         NewUseOpc = UseOpc == ARM::ADDrr ? ARM::SUBri : ARM::ADDri;
3369       } else
3370         return false;
3371       SOImmValV1 = (uint32_t)ARM_AM::getSOImmTwoPartFirst(ImmVal);
3372       SOImmValV2 = (uint32_t)ARM_AM::getSOImmTwoPartSecond(ImmVal);
3373       break;
3374     case ARM::ORRrr:
3375     case ARM::EORrr:
3376       if (!ARM_AM::isSOImmTwoPartVal(ImmVal))
3377         return false;
3378       SOImmValV1 = (uint32_t)ARM_AM::getSOImmTwoPartFirst(ImmVal);
3379       SOImmValV2 = (uint32_t)ARM_AM::getSOImmTwoPartSecond(ImmVal);
3380       switch (UseOpc) {
3381       default: break;
3382       case ARM::ORRrr: NewUseOpc = ARM::ORRri; break;
3383       case ARM::EORrr: NewUseOpc = ARM::EORri; break;
3384       }
3385       break;
3386     case ARM::t2ADDrr:
3387     case ARM::t2SUBrr: {
3388       if (UseOpc == ARM::t2SUBrr && Commute)
3389         return false;
3390 
3391       // ADD/SUB are special because they're essentially the same operation, so
3392       // we can handle a larger range of immediates.
3393       const bool ToSP = DefMI.getOperand(0).getReg() == ARM::SP;
3394       const unsigned t2ADD = ToSP ? ARM::t2ADDspImm : ARM::t2ADDri;
3395       const unsigned t2SUB = ToSP ? ARM::t2SUBspImm : ARM::t2SUBri;
3396       if (ARM_AM::isT2SOImmTwoPartVal(ImmVal))
3397         NewUseOpc = UseOpc == ARM::t2ADDrr ? t2ADD : t2SUB;
3398       else if (ARM_AM::isT2SOImmTwoPartVal(-ImmVal)) {
3399         ImmVal = -ImmVal;
3400         NewUseOpc = UseOpc == ARM::t2ADDrr ? t2SUB : t2ADD;
3401       } else
3402         return false;
3403       SOImmValV1 = (uint32_t)ARM_AM::getT2SOImmTwoPartFirst(ImmVal);
3404       SOImmValV2 = (uint32_t)ARM_AM::getT2SOImmTwoPartSecond(ImmVal);
3405       break;
3406     }
3407     case ARM::t2ORRrr:
3408     case ARM::t2EORrr:
3409       if (!ARM_AM::isT2SOImmTwoPartVal(ImmVal))
3410         return false;
3411       SOImmValV1 = (uint32_t)ARM_AM::getT2SOImmTwoPartFirst(ImmVal);
3412       SOImmValV2 = (uint32_t)ARM_AM::getT2SOImmTwoPartSecond(ImmVal);
3413       switch (UseOpc) {
3414       default: break;
3415       case ARM::t2ORRrr: NewUseOpc = ARM::t2ORRri; break;
3416       case ARM::t2EORrr: NewUseOpc = ARM::t2EORri; break;
3417       }
3418       break;
3419     }
3420   }
3421   }
3422 
3423   unsigned OpIdx = Commute ? 2 : 1;
3424   Register Reg1 = UseMI.getOperand(OpIdx).getReg();
3425   bool isKill = UseMI.getOperand(OpIdx).isKill();
3426   const TargetRegisterClass *TRC = MRI->getRegClass(Reg);
3427   Register NewReg = MRI->createVirtualRegister(TRC);
3428   BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), get(NewUseOpc),
3429           NewReg)
3430       .addReg(Reg1, getKillRegState(isKill))
3431       .addImm(SOImmValV1)
3432       .add(predOps(ARMCC::AL))
3433       .add(condCodeOp());
3434   UseMI.setDesc(get(NewUseOpc));
3435   UseMI.getOperand(1).setReg(NewReg);
3436   UseMI.getOperand(1).setIsKill();
3437   UseMI.getOperand(2).ChangeToImmediate(SOImmValV2);
3438   DefMI.eraseFromParent();
3439   // FIXME: t2ADDrr should be split, as different rulles apply when writing to SP.
3440   // Just as t2ADDri, that was split to [t2ADDri, t2ADDspImm].
3441   // Then the below code will not be needed, as the input/output register
3442   // classes will be rgpr or gprSP.
3443   // For now, we fix the UseMI operand explicitly here:
3444   switch(NewUseOpc){
3445     case ARM::t2ADDspImm:
3446     case ARM::t2SUBspImm:
3447     case ARM::t2ADDri:
3448     case ARM::t2SUBri:
3449       MRI->constrainRegClass(UseMI.getOperand(0).getReg(), TRC);
3450   }
3451   return true;
3452 }
3453 
3454 static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData,
3455                                         const MachineInstr &MI) {
3456   switch (MI.getOpcode()) {
3457   default: {
3458     const MCInstrDesc &Desc = MI.getDesc();
3459     int UOps = ItinData->getNumMicroOps(Desc.getSchedClass());
3460     assert(UOps >= 0 && "bad # UOps");
3461     return UOps;
3462   }
3463 
3464   case ARM::LDRrs:
3465   case ARM::LDRBrs:
3466   case ARM::STRrs:
3467   case ARM::STRBrs: {
3468     unsigned ShOpVal = MI.getOperand(3).getImm();
3469     bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
3470     unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
3471     if (!isSub &&
3472         (ShImm == 0 ||
3473          ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3474           ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
3475       return 1;
3476     return 2;
3477   }
3478 
3479   case ARM::LDRH:
3480   case ARM::STRH: {
3481     if (!MI.getOperand(2).getReg())
3482       return 1;
3483 
3484     unsigned ShOpVal = MI.getOperand(3).getImm();
3485     bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
3486     unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
3487     if (!isSub &&
3488         (ShImm == 0 ||
3489          ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3490           ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
3491       return 1;
3492     return 2;
3493   }
3494 
3495   case ARM::LDRSB:
3496   case ARM::LDRSH:
3497     return (ARM_AM::getAM3Op(MI.getOperand(3).getImm()) == ARM_AM::sub) ? 3 : 2;
3498 
3499   case ARM::LDRSB_POST:
3500   case ARM::LDRSH_POST: {
3501     Register Rt = MI.getOperand(0).getReg();
3502     Register Rm = MI.getOperand(3).getReg();
3503     return (Rt == Rm) ? 4 : 3;
3504   }
3505 
3506   case ARM::LDR_PRE_REG:
3507   case ARM::LDRB_PRE_REG: {
3508     Register Rt = MI.getOperand(0).getReg();
3509     Register Rm = MI.getOperand(3).getReg();
3510     if (Rt == Rm)
3511       return 3;
3512     unsigned ShOpVal = MI.getOperand(4).getImm();
3513     bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
3514     unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
3515     if (!isSub &&
3516         (ShImm == 0 ||
3517          ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3518           ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
3519       return 2;
3520     return 3;
3521   }
3522 
3523   case ARM::STR_PRE_REG:
3524   case ARM::STRB_PRE_REG: {
3525     unsigned ShOpVal = MI.getOperand(4).getImm();
3526     bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
3527     unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
3528     if (!isSub &&
3529         (ShImm == 0 ||
3530          ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3531           ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
3532       return 2;
3533     return 3;
3534   }
3535 
3536   case ARM::LDRH_PRE:
3537   case ARM::STRH_PRE: {
3538     Register Rt = MI.getOperand(0).getReg();
3539     Register Rm = MI.getOperand(3).getReg();
3540     if (!Rm)
3541       return 2;
3542     if (Rt == Rm)
3543       return 3;
3544     return (ARM_AM::getAM3Op(MI.getOperand(4).getImm()) == ARM_AM::sub) ? 3 : 2;
3545   }
3546 
3547   case ARM::LDR_POST_REG:
3548   case ARM::LDRB_POST_REG:
3549   case ARM::LDRH_POST: {
3550     Register Rt = MI.getOperand(0).getReg();
3551     Register Rm = MI.getOperand(3).getReg();
3552     return (Rt == Rm) ? 3 : 2;
3553   }
3554 
3555   case ARM::LDR_PRE_IMM:
3556   case ARM::LDRB_PRE_IMM:
3557   case ARM::LDR_POST_IMM:
3558   case ARM::LDRB_POST_IMM:
3559   case ARM::STRB_POST_IMM:
3560   case ARM::STRB_POST_REG:
3561   case ARM::STRB_PRE_IMM:
3562   case ARM::STRH_POST:
3563   case ARM::STR_POST_IMM:
3564   case ARM::STR_POST_REG:
3565   case ARM::STR_PRE_IMM:
3566     return 2;
3567 
3568   case ARM::LDRSB_PRE:
3569   case ARM::LDRSH_PRE: {
3570     Register Rm = MI.getOperand(3).getReg();
3571     if (Rm == 0)
3572       return 3;
3573     Register Rt = MI.getOperand(0).getReg();
3574     if (Rt == Rm)
3575       return 4;
3576     unsigned ShOpVal = MI.getOperand(4).getImm();
3577     bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
3578     unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
3579     if (!isSub &&
3580         (ShImm == 0 ||
3581          ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3582           ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
3583       return 3;
3584     return 4;
3585   }
3586 
3587   case ARM::LDRD: {
3588     Register Rt = MI.getOperand(0).getReg();
3589     Register Rn = MI.getOperand(2).getReg();
3590     Register Rm = MI.getOperand(3).getReg();
3591     if (Rm)
3592       return (ARM_AM::getAM3Op(MI.getOperand(4).getImm()) == ARM_AM::sub) ? 4
3593                                                                           : 3;
3594     return (Rt == Rn) ? 3 : 2;
3595   }
3596 
3597   case ARM::STRD: {
3598     Register Rm = MI.getOperand(3).getReg();
3599     if (Rm)
3600       return (ARM_AM::getAM3Op(MI.getOperand(4).getImm()) == ARM_AM::sub) ? 4
3601                                                                           : 3;
3602     return 2;
3603   }
3604 
3605   case ARM::LDRD_POST:
3606   case ARM::t2LDRD_POST:
3607     return 3;
3608 
3609   case ARM::STRD_POST:
3610   case ARM::t2STRD_POST:
3611     return 4;
3612 
3613   case ARM::LDRD_PRE: {
3614     Register Rt = MI.getOperand(0).getReg();
3615     Register Rn = MI.getOperand(3).getReg();
3616     Register Rm = MI.getOperand(4).getReg();
3617     if (Rm)
3618       return (ARM_AM::getAM3Op(MI.getOperand(5).getImm()) == ARM_AM::sub) ? 5
3619                                                                           : 4;
3620     return (Rt == Rn) ? 4 : 3;
3621   }
3622 
3623   case ARM::t2LDRD_PRE: {
3624     Register Rt = MI.getOperand(0).getReg();
3625     Register Rn = MI.getOperand(3).getReg();
3626     return (Rt == Rn) ? 4 : 3;
3627   }
3628 
3629   case ARM::STRD_PRE: {
3630     Register Rm = MI.getOperand(4).getReg();
3631     if (Rm)
3632       return (ARM_AM::getAM3Op(MI.getOperand(5).getImm()) == ARM_AM::sub) ? 5
3633                                                                           : 4;
3634     return 3;
3635   }
3636 
3637   case ARM::t2STRD_PRE:
3638     return 3;
3639 
3640   case ARM::t2LDR_POST:
3641   case ARM::t2LDRB_POST:
3642   case ARM::t2LDRB_PRE:
3643   case ARM::t2LDRSBi12:
3644   case ARM::t2LDRSBi8:
3645   case ARM::t2LDRSBpci:
3646   case ARM::t2LDRSBs:
3647   case ARM::t2LDRH_POST:
3648   case ARM::t2LDRH_PRE:
3649   case ARM::t2LDRSBT:
3650   case ARM::t2LDRSB_POST:
3651   case ARM::t2LDRSB_PRE:
3652   case ARM::t2LDRSH_POST:
3653   case ARM::t2LDRSH_PRE:
3654   case ARM::t2LDRSHi12:
3655   case ARM::t2LDRSHi8:
3656   case ARM::t2LDRSHpci:
3657   case ARM::t2LDRSHs:
3658     return 2;
3659 
3660   case ARM::t2LDRDi8: {
3661     Register Rt = MI.getOperand(0).getReg();
3662     Register Rn = MI.getOperand(2).getReg();
3663     return (Rt == Rn) ? 3 : 2;
3664   }
3665 
3666   case ARM::t2STRB_POST:
3667   case ARM::t2STRB_PRE:
3668   case ARM::t2STRBs:
3669   case ARM::t2STRDi8:
3670   case ARM::t2STRH_POST:
3671   case ARM::t2STRH_PRE:
3672   case ARM::t2STRHs:
3673   case ARM::t2STR_POST:
3674   case ARM::t2STR_PRE:
3675   case ARM::t2STRs:
3676     return 2;
3677   }
3678 }
3679 
3680 // Return the number of 32-bit words loaded by LDM or stored by STM. If this
3681 // can't be easily determined return 0 (missing MachineMemOperand).
3682 //
3683 // FIXME: The current MachineInstr design does not support relying on machine
3684 // mem operands to determine the width of a memory access. Instead, we expect
3685 // the target to provide this information based on the instruction opcode and
3686 // operands. However, using MachineMemOperand is the best solution now for
3687 // two reasons:
3688 //
3689 // 1) getNumMicroOps tries to infer LDM memory width from the total number of MI
3690 // operands. This is much more dangerous than using the MachineMemOperand
3691 // sizes because CodeGen passes can insert/remove optional machine operands. In
3692 // fact, it's totally incorrect for preRA passes and appears to be wrong for
3693 // postRA passes as well.
3694 //
3695 // 2) getNumLDMAddresses is only used by the scheduling machine model and any
3696 // machine model that calls this should handle the unknown (zero size) case.
3697 //
3698 // Long term, we should require a target hook that verifies MachineMemOperand
3699 // sizes during MC lowering. That target hook should be local to MC lowering
3700 // because we can't ensure that it is aware of other MI forms. Doing this will
3701 // ensure that MachineMemOperands are correctly propagated through all passes.
3702 unsigned ARMBaseInstrInfo::getNumLDMAddresses(const MachineInstr &MI) const {
3703   unsigned Size = 0;
3704   for (MachineInstr::mmo_iterator I = MI.memoperands_begin(),
3705                                   E = MI.memoperands_end();
3706        I != E; ++I) {
3707     Size += (*I)->getSize();
3708   }
3709   // FIXME: The scheduler currently can't handle values larger than 16. But
3710   // the values can actually go up to 32 for floating-point load/store
3711   // multiple (VLDMIA etc.). Also, the way this code is reasoning about memory
3712   // operations isn't right; we could end up with "extra" memory operands for
3713   // various reasons, like tail merge merging two memory operations.
3714   return std::min(Size / 4, 16U);
3715 }
3716 
3717 static unsigned getNumMicroOpsSingleIssuePlusExtras(unsigned Opc,
3718                                                     unsigned NumRegs) {
3719   unsigned UOps = 1 + NumRegs; // 1 for address computation.
3720   switch (Opc) {
3721   default:
3722     break;
3723   case ARM::VLDMDIA_UPD:
3724   case ARM::VLDMDDB_UPD:
3725   case ARM::VLDMSIA_UPD:
3726   case ARM::VLDMSDB_UPD:
3727   case ARM::VSTMDIA_UPD:
3728   case ARM::VSTMDDB_UPD:
3729   case ARM::VSTMSIA_UPD:
3730   case ARM::VSTMSDB_UPD:
3731   case ARM::LDMIA_UPD:
3732   case ARM::LDMDA_UPD:
3733   case ARM::LDMDB_UPD:
3734   case ARM::LDMIB_UPD:
3735   case ARM::STMIA_UPD:
3736   case ARM::STMDA_UPD:
3737   case ARM::STMDB_UPD:
3738   case ARM::STMIB_UPD:
3739   case ARM::tLDMIA_UPD:
3740   case ARM::tSTMIA_UPD:
3741   case ARM::t2LDMIA_UPD:
3742   case ARM::t2LDMDB_UPD:
3743   case ARM::t2STMIA_UPD:
3744   case ARM::t2STMDB_UPD:
3745     ++UOps; // One for base register writeback.
3746     break;
3747   case ARM::LDMIA_RET:
3748   case ARM::tPOP_RET:
3749   case ARM::t2LDMIA_RET:
3750     UOps += 2; // One for base reg wb, one for write to pc.
3751     break;
3752   }
3753   return UOps;
3754 }
3755 
3756 unsigned ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
3757                                           const MachineInstr &MI) const {
3758   if (!ItinData || ItinData->isEmpty())
3759     return 1;
3760 
3761   const MCInstrDesc &Desc = MI.getDesc();
3762   unsigned Class = Desc.getSchedClass();
3763   int ItinUOps = ItinData->getNumMicroOps(Class);
3764   if (ItinUOps >= 0) {
3765     if (Subtarget.isSwift() && (Desc.mayLoad() || Desc.mayStore()))
3766       return getNumMicroOpsSwiftLdSt(ItinData, MI);
3767 
3768     return ItinUOps;
3769   }
3770 
3771   unsigned Opc = MI.getOpcode();
3772   switch (Opc) {
3773   default:
3774     llvm_unreachable("Unexpected multi-uops instruction!");
3775   case ARM::VLDMQIA:
3776   case ARM::VSTMQIA:
3777     return 2;
3778 
3779   // The number of uOps for load / store multiple are determined by the number
3780   // registers.
3781   //
3782   // On Cortex-A8, each pair of register loads / stores can be scheduled on the
3783   // same cycle. The scheduling for the first load / store must be done
3784   // separately by assuming the address is not 64-bit aligned.
3785   //
3786   // On Cortex-A9, the formula is simply (#reg / 2) + (#reg % 2). If the address
3787   // is not 64-bit aligned, then AGU would take an extra cycle.  For VFP / NEON
3788   // load / store multiple, the formula is (#reg / 2) + (#reg % 2) + 1.
3789   case ARM::VLDMDIA:
3790   case ARM::VLDMDIA_UPD:
3791   case ARM::VLDMDDB_UPD:
3792   case ARM::VLDMSIA:
3793   case ARM::VLDMSIA_UPD:
3794   case ARM::VLDMSDB_UPD:
3795   case ARM::VSTMDIA:
3796   case ARM::VSTMDIA_UPD:
3797   case ARM::VSTMDDB_UPD:
3798   case ARM::VSTMSIA:
3799   case ARM::VSTMSIA_UPD:
3800   case ARM::VSTMSDB_UPD: {
3801     unsigned NumRegs = MI.getNumOperands() - Desc.getNumOperands();
3802     return (NumRegs / 2) + (NumRegs % 2) + 1;
3803   }
3804 
3805   case ARM::LDMIA_RET:
3806   case ARM::LDMIA:
3807   case ARM::LDMDA:
3808   case ARM::LDMDB:
3809   case ARM::LDMIB:
3810   case ARM::LDMIA_UPD:
3811   case ARM::LDMDA_UPD:
3812   case ARM::LDMDB_UPD:
3813   case ARM::LDMIB_UPD:
3814   case ARM::STMIA:
3815   case ARM::STMDA:
3816   case ARM::STMDB:
3817   case ARM::STMIB:
3818   case ARM::STMIA_UPD:
3819   case ARM::STMDA_UPD:
3820   case ARM::STMDB_UPD:
3821   case ARM::STMIB_UPD:
3822   case ARM::tLDMIA:
3823   case ARM::tLDMIA_UPD:
3824   case ARM::tSTMIA_UPD:
3825   case ARM::tPOP_RET:
3826   case ARM::tPOP:
3827   case ARM::tPUSH:
3828   case ARM::t2LDMIA_RET:
3829   case ARM::t2LDMIA:
3830   case ARM::t2LDMDB:
3831   case ARM::t2LDMIA_UPD:
3832   case ARM::t2LDMDB_UPD:
3833   case ARM::t2STMIA:
3834   case ARM::t2STMDB:
3835   case ARM::t2STMIA_UPD:
3836   case ARM::t2STMDB_UPD: {
3837     unsigned NumRegs = MI.getNumOperands() - Desc.getNumOperands() + 1;
3838     switch (Subtarget.getLdStMultipleTiming()) {
3839     case ARMSubtarget::SingleIssuePlusExtras:
3840       return getNumMicroOpsSingleIssuePlusExtras(Opc, NumRegs);
3841     case ARMSubtarget::SingleIssue:
3842       // Assume the worst.
3843       return NumRegs;
3844     case ARMSubtarget::DoubleIssue: {
3845       if (NumRegs < 4)
3846         return 2;
3847       // 4 registers would be issued: 2, 2.
3848       // 5 registers would be issued: 2, 2, 1.
3849       unsigned UOps = (NumRegs / 2);
3850       if (NumRegs % 2)
3851         ++UOps;
3852       return UOps;
3853     }
3854     case ARMSubtarget::DoubleIssueCheckUnalignedAccess: {
3855       unsigned UOps = (NumRegs / 2);
3856       // If there are odd number of registers or if it's not 64-bit aligned,
3857       // then it takes an extra AGU (Address Generation Unit) cycle.
3858       if ((NumRegs % 2) || !MI.hasOneMemOperand() ||
3859           (*MI.memoperands_begin())->getAlign() < Align(8))
3860         ++UOps;
3861       return UOps;
3862       }
3863     }
3864   }
3865   }
3866   llvm_unreachable("Didn't find the number of microops");
3867 }
3868 
3869 int
3870 ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData,
3871                                   const MCInstrDesc &DefMCID,
3872                                   unsigned DefClass,
3873                                   unsigned DefIdx, unsigned DefAlign) const {
3874   int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1;
3875   if (RegNo <= 0)
3876     // Def is the address writeback.
3877     return ItinData->getOperandCycle(DefClass, DefIdx);
3878 
3879   int DefCycle;
3880   if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
3881     // (regno / 2) + (regno % 2) + 1
3882     DefCycle = RegNo / 2 + 1;
3883     if (RegNo % 2)
3884       ++DefCycle;
3885   } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
3886     DefCycle = RegNo;
3887     bool isSLoad = false;
3888 
3889     switch (DefMCID.getOpcode()) {
3890     default: break;
3891     case ARM::VLDMSIA:
3892     case ARM::VLDMSIA_UPD:
3893     case ARM::VLDMSDB_UPD:
3894       isSLoad = true;
3895       break;
3896     }
3897 
3898     // If there are odd number of 'S' registers or if it's not 64-bit aligned,
3899     // then it takes an extra cycle.
3900     if ((isSLoad && (RegNo % 2)) || DefAlign < 8)
3901       ++DefCycle;
3902   } else {
3903     // Assume the worst.
3904     DefCycle = RegNo + 2;
3905   }
3906 
3907   return DefCycle;
3908 }
3909 
3910 int
3911 ARMBaseInstrInfo::getLDMDefCycle(const InstrItineraryData *ItinData,
3912                                  const MCInstrDesc &DefMCID,
3913                                  unsigned DefClass,
3914                                  unsigned DefIdx, unsigned DefAlign) const {
3915   int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1;
3916   if (RegNo <= 0)
3917     // Def is the address writeback.
3918     return ItinData->getOperandCycle(DefClass, DefIdx);
3919 
3920   int DefCycle;
3921   if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
3922     // 4 registers would be issued: 1, 2, 1.
3923     // 5 registers would be issued: 1, 2, 2.
3924     DefCycle = RegNo / 2;
3925     if (DefCycle < 1)
3926       DefCycle = 1;
3927     // Result latency is issue cycle + 2: E2.
3928     DefCycle += 2;
3929   } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
3930     DefCycle = (RegNo / 2);
3931     // If there are odd number of registers or if it's not 64-bit aligned,
3932     // then it takes an extra AGU (Address Generation Unit) cycle.
3933     if ((RegNo % 2) || DefAlign < 8)
3934       ++DefCycle;
3935     // Result latency is AGU cycles + 2.
3936     DefCycle += 2;
3937   } else {
3938     // Assume the worst.
3939     DefCycle = RegNo + 2;
3940   }
3941 
3942   return DefCycle;
3943 }
3944 
3945 int
3946 ARMBaseInstrInfo::getVSTMUseCycle(const InstrItineraryData *ItinData,
3947                                   const MCInstrDesc &UseMCID,
3948                                   unsigned UseClass,
3949                                   unsigned UseIdx, unsigned UseAlign) const {
3950   int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1;
3951   if (RegNo <= 0)
3952     return ItinData->getOperandCycle(UseClass, UseIdx);
3953 
3954   int UseCycle;
3955   if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
3956     // (regno / 2) + (regno % 2) + 1
3957     UseCycle = RegNo / 2 + 1;
3958     if (RegNo % 2)
3959       ++UseCycle;
3960   } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
3961     UseCycle = RegNo;
3962     bool isSStore = false;
3963 
3964     switch (UseMCID.getOpcode()) {
3965     default: break;
3966     case ARM::VSTMSIA:
3967     case ARM::VSTMSIA_UPD:
3968     case ARM::VSTMSDB_UPD:
3969       isSStore = true;
3970       break;
3971     }
3972 
3973     // If there are odd number of 'S' registers or if it's not 64-bit aligned,
3974     // then it takes an extra cycle.
3975     if ((isSStore && (RegNo % 2)) || UseAlign < 8)
3976       ++UseCycle;
3977   } else {
3978     // Assume the worst.
3979     UseCycle = RegNo + 2;
3980   }
3981 
3982   return UseCycle;
3983 }
3984 
3985 int
3986 ARMBaseInstrInfo::getSTMUseCycle(const InstrItineraryData *ItinData,
3987                                  const MCInstrDesc &UseMCID,
3988                                  unsigned UseClass,
3989                                  unsigned UseIdx, unsigned UseAlign) const {
3990   int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1;
3991   if (RegNo <= 0)
3992     return ItinData->getOperandCycle(UseClass, UseIdx);
3993 
3994   int UseCycle;
3995   if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
3996     UseCycle = RegNo / 2;
3997     if (UseCycle < 2)
3998       UseCycle = 2;
3999     // Read in E3.
4000     UseCycle += 2;
4001   } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
4002     UseCycle = (RegNo / 2);
4003     // If there are odd number of registers or if it's not 64-bit aligned,
4004     // then it takes an extra AGU (Address Generation Unit) cycle.
4005     if ((RegNo % 2) || UseAlign < 8)
4006       ++UseCycle;
4007   } else {
4008     // Assume the worst.
4009     UseCycle = 1;
4010   }
4011   return UseCycle;
4012 }
4013 
4014 int
4015 ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
4016                                     const MCInstrDesc &DefMCID,
4017                                     unsigned DefIdx, unsigned DefAlign,
4018                                     const MCInstrDesc &UseMCID,
4019                                     unsigned UseIdx, unsigned UseAlign) const {
4020   unsigned DefClass = DefMCID.getSchedClass();
4021   unsigned UseClass = UseMCID.getSchedClass();
4022 
4023   if (DefIdx < DefMCID.getNumDefs() && UseIdx < UseMCID.getNumOperands())
4024     return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
4025 
4026   // This may be a def / use of a variable_ops instruction, the operand
4027   // latency might be determinable dynamically. Let the target try to
4028   // figure it out.
4029   int DefCycle = -1;
4030   bool LdmBypass = false;
4031   switch (DefMCID.getOpcode()) {
4032   default:
4033     DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
4034     break;
4035 
4036   case ARM::VLDMDIA:
4037   case ARM::VLDMDIA_UPD:
4038   case ARM::VLDMDDB_UPD:
4039   case ARM::VLDMSIA:
4040   case ARM::VLDMSIA_UPD:
4041   case ARM::VLDMSDB_UPD:
4042     DefCycle = getVLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
4043     break;
4044 
4045   case ARM::LDMIA_RET:
4046   case ARM::LDMIA:
4047   case ARM::LDMDA:
4048   case ARM::LDMDB:
4049   case ARM::LDMIB:
4050   case ARM::LDMIA_UPD:
4051   case ARM::LDMDA_UPD:
4052   case ARM::LDMDB_UPD:
4053   case ARM::LDMIB_UPD:
4054   case ARM::tLDMIA:
4055   case ARM::tLDMIA_UPD:
4056   case ARM::tPUSH:
4057   case ARM::t2LDMIA_RET:
4058   case ARM::t2LDMIA:
4059   case ARM::t2LDMDB:
4060   case ARM::t2LDMIA_UPD:
4061   case ARM::t2LDMDB_UPD:
4062     LdmBypass = true;
4063     DefCycle = getLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
4064     break;
4065   }
4066 
4067   if (DefCycle == -1)
4068     // We can't seem to determine the result latency of the def, assume it's 2.
4069     DefCycle = 2;
4070 
4071   int UseCycle = -1;
4072   switch (UseMCID.getOpcode()) {
4073   default:
4074     UseCycle = ItinData->getOperandCycle(UseClass, UseIdx);
4075     break;
4076 
4077   case ARM::VSTMDIA:
4078   case ARM::VSTMDIA_UPD:
4079   case ARM::VSTMDDB_UPD:
4080   case ARM::VSTMSIA:
4081   case ARM::VSTMSIA_UPD:
4082   case ARM::VSTMSDB_UPD:
4083     UseCycle = getVSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
4084     break;
4085 
4086   case ARM::STMIA:
4087   case ARM::STMDA:
4088   case ARM::STMDB:
4089   case ARM::STMIB:
4090   case ARM::STMIA_UPD:
4091   case ARM::STMDA_UPD:
4092   case ARM::STMDB_UPD:
4093   case ARM::STMIB_UPD:
4094   case ARM::tSTMIA_UPD:
4095   case ARM::tPOP_RET:
4096   case ARM::tPOP:
4097   case ARM::t2STMIA:
4098   case ARM::t2STMDB:
4099   case ARM::t2STMIA_UPD:
4100   case ARM::t2STMDB_UPD:
4101     UseCycle = getSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
4102     break;
4103   }
4104 
4105   if (UseCycle == -1)
4106     // Assume it's read in the first stage.
4107     UseCycle = 1;
4108 
4109   UseCycle = DefCycle - UseCycle + 1;
4110   if (UseCycle > 0) {
4111     if (LdmBypass) {
4112       // It's a variable_ops instruction so we can't use DefIdx here. Just use
4113       // first def operand.
4114       if (ItinData->hasPipelineForwarding(DefClass, DefMCID.getNumOperands()-1,
4115                                           UseClass, UseIdx))
4116         --UseCycle;
4117     } else if (ItinData->hasPipelineForwarding(DefClass, DefIdx,
4118                                                UseClass, UseIdx)) {
4119       --UseCycle;
4120     }
4121   }
4122 
4123   return UseCycle;
4124 }
4125 
4126 static const MachineInstr *getBundledDefMI(const TargetRegisterInfo *TRI,
4127                                            const MachineInstr *MI, unsigned Reg,
4128                                            unsigned &DefIdx, unsigned &Dist) {
4129   Dist = 0;
4130 
4131   MachineBasicBlock::const_iterator I = MI; ++I;
4132   MachineBasicBlock::const_instr_iterator II = std::prev(I.getInstrIterator());
4133   assert(II->isInsideBundle() && "Empty bundle?");
4134 
4135   int Idx = -1;
4136   while (II->isInsideBundle()) {
4137     Idx = II->findRegisterDefOperandIdx(Reg, false, true, TRI);
4138     if (Idx != -1)
4139       break;
4140     --II;
4141     ++Dist;
4142   }
4143 
4144   assert(Idx != -1 && "Cannot find bundled definition!");
4145   DefIdx = Idx;
4146   return &*II;
4147 }
4148 
4149 static const MachineInstr *getBundledUseMI(const TargetRegisterInfo *TRI,
4150                                            const MachineInstr &MI, unsigned Reg,
4151                                            unsigned &UseIdx, unsigned &Dist) {
4152   Dist = 0;
4153 
4154   MachineBasicBlock::const_instr_iterator II = ++MI.getIterator();
4155   assert(II->isInsideBundle() && "Empty bundle?");
4156   MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end();
4157 
4158   // FIXME: This doesn't properly handle multiple uses.
4159   int Idx = -1;
4160   while (II != E && II->isInsideBundle()) {
4161     Idx = II->findRegisterUseOperandIdx(Reg, false, TRI);
4162     if (Idx != -1)
4163       break;
4164     if (II->getOpcode() != ARM::t2IT)
4165       ++Dist;
4166     ++II;
4167   }
4168 
4169   if (Idx == -1) {
4170     Dist = 0;
4171     return nullptr;
4172   }
4173 
4174   UseIdx = Idx;
4175   return &*II;
4176 }
4177 
4178 /// Return the number of cycles to add to (or subtract from) the static
4179 /// itinerary based on the def opcode and alignment. The caller will ensure that
4180 /// adjusted latency is at least one cycle.
4181 static int adjustDefLatency(const ARMSubtarget &Subtarget,
4182                             const MachineInstr &DefMI,
4183                             const MCInstrDesc &DefMCID, unsigned DefAlign) {
4184   int Adjust = 0;
4185   if (Subtarget.isCortexA8() || Subtarget.isLikeA9() || Subtarget.isCortexA7()) {
4186     // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
4187     // variants are one cycle cheaper.
4188     switch (DefMCID.getOpcode()) {
4189     default: break;
4190     case ARM::LDRrs:
4191     case ARM::LDRBrs: {
4192       unsigned ShOpVal = DefMI.getOperand(3).getImm();
4193       unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
4194       if (ShImm == 0 ||
4195           (ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))
4196         --Adjust;
4197       break;
4198     }
4199     case ARM::t2LDRs:
4200     case ARM::t2LDRBs:
4201     case ARM::t2LDRHs:
4202     case ARM::t2LDRSHs: {
4203       // Thumb2 mode: lsl only.
4204       unsigned ShAmt = DefMI.getOperand(3).getImm();
4205       if (ShAmt == 0 || ShAmt == 2)
4206         --Adjust;
4207       break;
4208     }
4209     }
4210   } else if (Subtarget.isSwift()) {
4211     // FIXME: Properly handle all of the latency adjustments for address
4212     // writeback.
4213     switch (DefMCID.getOpcode()) {
4214     default: break;
4215     case ARM::LDRrs:
4216     case ARM::LDRBrs: {
4217       unsigned ShOpVal = DefMI.getOperand(3).getImm();
4218       bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
4219       unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
4220       if (!isSub &&
4221           (ShImm == 0 ||
4222            ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
4223             ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
4224         Adjust -= 2;
4225       else if (!isSub &&
4226                ShImm == 1 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsr)
4227         --Adjust;
4228       break;
4229     }
4230     case ARM::t2LDRs:
4231     case ARM::t2LDRBs:
4232     case ARM::t2LDRHs:
4233     case ARM::t2LDRSHs: {
4234       // Thumb2 mode: lsl only.
4235       unsigned ShAmt = DefMI.getOperand(3).getImm();
4236       if (ShAmt == 0 || ShAmt == 1 || ShAmt == 2 || ShAmt == 3)
4237         Adjust -= 2;
4238       break;
4239     }
4240     }
4241   }
4242 
4243   if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment()) {
4244     switch (DefMCID.getOpcode()) {
4245     default: break;
4246     case ARM::VLD1q8:
4247     case ARM::VLD1q16:
4248     case ARM::VLD1q32:
4249     case ARM::VLD1q64:
4250     case ARM::VLD1q8wb_fixed:
4251     case ARM::VLD1q16wb_fixed:
4252     case ARM::VLD1q32wb_fixed:
4253     case ARM::VLD1q64wb_fixed:
4254     case ARM::VLD1q8wb_register:
4255     case ARM::VLD1q16wb_register:
4256     case ARM::VLD1q32wb_register:
4257     case ARM::VLD1q64wb_register:
4258     case ARM::VLD2d8:
4259     case ARM::VLD2d16:
4260     case ARM::VLD2d32:
4261     case ARM::VLD2q8:
4262     case ARM::VLD2q16:
4263     case ARM::VLD2q32:
4264     case ARM::VLD2d8wb_fixed:
4265     case ARM::VLD2d16wb_fixed:
4266     case ARM::VLD2d32wb_fixed:
4267     case ARM::VLD2q8wb_fixed:
4268     case ARM::VLD2q16wb_fixed:
4269     case ARM::VLD2q32wb_fixed:
4270     case ARM::VLD2d8wb_register:
4271     case ARM::VLD2d16wb_register:
4272     case ARM::VLD2d32wb_register:
4273     case ARM::VLD2q8wb_register:
4274     case ARM::VLD2q16wb_register:
4275     case ARM::VLD2q32wb_register:
4276     case ARM::VLD3d8:
4277     case ARM::VLD3d16:
4278     case ARM::VLD3d32:
4279     case ARM::VLD1d64T:
4280     case ARM::VLD3d8_UPD:
4281     case ARM::VLD3d16_UPD:
4282     case ARM::VLD3d32_UPD:
4283     case ARM::VLD1d64Twb_fixed:
4284     case ARM::VLD1d64Twb_register:
4285     case ARM::VLD3q8_UPD:
4286     case ARM::VLD3q16_UPD:
4287     case ARM::VLD3q32_UPD:
4288     case ARM::VLD4d8:
4289     case ARM::VLD4d16:
4290     case ARM::VLD4d32:
4291     case ARM::VLD1d64Q:
4292     case ARM::VLD4d8_UPD:
4293     case ARM::VLD4d16_UPD:
4294     case ARM::VLD4d32_UPD:
4295     case ARM::VLD1d64Qwb_fixed:
4296     case ARM::VLD1d64Qwb_register:
4297     case ARM::VLD4q8_UPD:
4298     case ARM::VLD4q16_UPD:
4299     case ARM::VLD4q32_UPD:
4300     case ARM::VLD1DUPq8:
4301     case ARM::VLD1DUPq16:
4302     case ARM::VLD1DUPq32:
4303     case ARM::VLD1DUPq8wb_fixed:
4304     case ARM::VLD1DUPq16wb_fixed:
4305     case ARM::VLD1DUPq32wb_fixed:
4306     case ARM::VLD1DUPq8wb_register:
4307     case ARM::VLD1DUPq16wb_register:
4308     case ARM::VLD1DUPq32wb_register:
4309     case ARM::VLD2DUPd8:
4310     case ARM::VLD2DUPd16:
4311     case ARM::VLD2DUPd32:
4312     case ARM::VLD2DUPd8wb_fixed:
4313     case ARM::VLD2DUPd16wb_fixed:
4314     case ARM::VLD2DUPd32wb_fixed:
4315     case ARM::VLD2DUPd8wb_register:
4316     case ARM::VLD2DUPd16wb_register:
4317     case ARM::VLD2DUPd32wb_register:
4318     case ARM::VLD4DUPd8:
4319     case ARM::VLD4DUPd16:
4320     case ARM::VLD4DUPd32:
4321     case ARM::VLD4DUPd8_UPD:
4322     case ARM::VLD4DUPd16_UPD:
4323     case ARM::VLD4DUPd32_UPD:
4324     case ARM::VLD1LNd8:
4325     case ARM::VLD1LNd16:
4326     case ARM::VLD1LNd32:
4327     case ARM::VLD1LNd8_UPD:
4328     case ARM::VLD1LNd16_UPD:
4329     case ARM::VLD1LNd32_UPD:
4330     case ARM::VLD2LNd8:
4331     case ARM::VLD2LNd16:
4332     case ARM::VLD2LNd32:
4333     case ARM::VLD2LNq16:
4334     case ARM::VLD2LNq32:
4335     case ARM::VLD2LNd8_UPD:
4336     case ARM::VLD2LNd16_UPD:
4337     case ARM::VLD2LNd32_UPD:
4338     case ARM::VLD2LNq16_UPD:
4339     case ARM::VLD2LNq32_UPD:
4340     case ARM::VLD4LNd8:
4341     case ARM::VLD4LNd16:
4342     case ARM::VLD4LNd32:
4343     case ARM::VLD4LNq16:
4344     case ARM::VLD4LNq32:
4345     case ARM::VLD4LNd8_UPD:
4346     case ARM::VLD4LNd16_UPD:
4347     case ARM::VLD4LNd32_UPD:
4348     case ARM::VLD4LNq16_UPD:
4349     case ARM::VLD4LNq32_UPD:
4350       // If the address is not 64-bit aligned, the latencies of these
4351       // instructions increases by one.
4352       ++Adjust;
4353       break;
4354     }
4355   }
4356   return Adjust;
4357 }
4358 
4359 int ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
4360                                         const MachineInstr &DefMI,
4361                                         unsigned DefIdx,
4362                                         const MachineInstr &UseMI,
4363                                         unsigned UseIdx) const {
4364   // No operand latency. The caller may fall back to getInstrLatency.
4365   if (!ItinData || ItinData->isEmpty())
4366     return -1;
4367 
4368   const MachineOperand &DefMO = DefMI.getOperand(DefIdx);
4369   Register Reg = DefMO.getReg();
4370 
4371   const MachineInstr *ResolvedDefMI = &DefMI;
4372   unsigned DefAdj = 0;
4373   if (DefMI.isBundle())
4374     ResolvedDefMI =
4375         getBundledDefMI(&getRegisterInfo(), &DefMI, Reg, DefIdx, DefAdj);
4376   if (ResolvedDefMI->isCopyLike() || ResolvedDefMI->isInsertSubreg() ||
4377       ResolvedDefMI->isRegSequence() || ResolvedDefMI->isImplicitDef()) {
4378     return 1;
4379   }
4380 
4381   const MachineInstr *ResolvedUseMI = &UseMI;
4382   unsigned UseAdj = 0;
4383   if (UseMI.isBundle()) {
4384     ResolvedUseMI =
4385         getBundledUseMI(&getRegisterInfo(), UseMI, Reg, UseIdx, UseAdj);
4386     if (!ResolvedUseMI)
4387       return -1;
4388   }
4389 
4390   return getOperandLatencyImpl(
4391       ItinData, *ResolvedDefMI, DefIdx, ResolvedDefMI->getDesc(), DefAdj, DefMO,
4392       Reg, *ResolvedUseMI, UseIdx, ResolvedUseMI->getDesc(), UseAdj);
4393 }
4394 
4395 int ARMBaseInstrInfo::getOperandLatencyImpl(
4396     const InstrItineraryData *ItinData, const MachineInstr &DefMI,
4397     unsigned DefIdx, const MCInstrDesc &DefMCID, unsigned DefAdj,
4398     const MachineOperand &DefMO, unsigned Reg, const MachineInstr &UseMI,
4399     unsigned UseIdx, const MCInstrDesc &UseMCID, unsigned UseAdj) const {
4400   if (Reg == ARM::CPSR) {
4401     if (DefMI.getOpcode() == ARM::FMSTAT) {
4402       // fpscr -> cpsr stalls over 20 cycles on A8 (and earlier?)
4403       return Subtarget.isLikeA9() ? 1 : 20;
4404     }
4405 
4406     // CPSR set and branch can be paired in the same cycle.
4407     if (UseMI.isBranch())
4408       return 0;
4409 
4410     // Otherwise it takes the instruction latency (generally one).
4411     unsigned Latency = getInstrLatency(ItinData, DefMI);
4412 
4413     // For Thumb2 and -Os, prefer scheduling CPSR setting instruction close to
4414     // its uses. Instructions which are otherwise scheduled between them may
4415     // incur a code size penalty (not able to use the CPSR setting 16-bit
4416     // instructions).
4417     if (Latency > 0 && Subtarget.isThumb2()) {
4418       const MachineFunction *MF = DefMI.getParent()->getParent();
4419       // FIXME: Use Function::hasOptSize().
4420       if (MF->getFunction().hasFnAttribute(Attribute::OptimizeForSize))
4421         --Latency;
4422     }
4423     return Latency;
4424   }
4425 
4426   if (DefMO.isImplicit() || UseMI.getOperand(UseIdx).isImplicit())
4427     return -1;
4428 
4429   unsigned DefAlign = DefMI.hasOneMemOperand()
4430                           ? (*DefMI.memoperands_begin())->getAlign().value()
4431                           : 0;
4432   unsigned UseAlign = UseMI.hasOneMemOperand()
4433                           ? (*UseMI.memoperands_begin())->getAlign().value()
4434                           : 0;
4435 
4436   // Get the itinerary's latency if possible, and handle variable_ops.
4437   int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign, UseMCID,
4438                                   UseIdx, UseAlign);
4439   // Unable to find operand latency. The caller may resort to getInstrLatency.
4440   if (Latency < 0)
4441     return Latency;
4442 
4443   // Adjust for IT block position.
4444   int Adj = DefAdj + UseAdj;
4445 
4446   // Adjust for dynamic def-side opcode variants not captured by the itinerary.
4447   Adj += adjustDefLatency(Subtarget, DefMI, DefMCID, DefAlign);
4448   if (Adj >= 0 || (int)Latency > -Adj) {
4449     return Latency + Adj;
4450   }
4451   // Return the itinerary latency, which may be zero but not less than zero.
4452   return Latency;
4453 }
4454 
4455 int
4456 ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
4457                                     SDNode *DefNode, unsigned DefIdx,
4458                                     SDNode *UseNode, unsigned UseIdx) const {
4459   if (!DefNode->isMachineOpcode())
4460     return 1;
4461 
4462   const MCInstrDesc &DefMCID = get(DefNode->getMachineOpcode());
4463 
4464   if (isZeroCost(DefMCID.Opcode))
4465     return 0;
4466 
4467   if (!ItinData || ItinData->isEmpty())
4468     return DefMCID.mayLoad() ? 3 : 1;
4469 
4470   if (!UseNode->isMachineOpcode()) {
4471     int Latency = ItinData->getOperandCycle(DefMCID.getSchedClass(), DefIdx);
4472     int Adj = Subtarget.getPreISelOperandLatencyAdjustment();
4473     int Threshold = 1 + Adj;
4474     return Latency <= Threshold ? 1 : Latency - Adj;
4475   }
4476 
4477   const MCInstrDesc &UseMCID = get(UseNode->getMachineOpcode());
4478   auto *DefMN = cast<MachineSDNode>(DefNode);
4479   unsigned DefAlign = !DefMN->memoperands_empty()
4480                           ? (*DefMN->memoperands_begin())->getAlign().value()
4481                           : 0;
4482   auto *UseMN = cast<MachineSDNode>(UseNode);
4483   unsigned UseAlign = !UseMN->memoperands_empty()
4484                           ? (*UseMN->memoperands_begin())->getAlign().value()
4485                           : 0;
4486   int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign,
4487                                   UseMCID, UseIdx, UseAlign);
4488 
4489   if (Latency > 1 &&
4490       (Subtarget.isCortexA8() || Subtarget.isLikeA9() ||
4491        Subtarget.isCortexA7())) {
4492     // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
4493     // variants are one cycle cheaper.
4494     switch (DefMCID.getOpcode()) {
4495     default: break;
4496     case ARM::LDRrs:
4497     case ARM::LDRBrs: {
4498       unsigned ShOpVal =
4499         cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue();
4500       unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
4501       if (ShImm == 0 ||
4502           (ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))
4503         --Latency;
4504       break;
4505     }
4506     case ARM::t2LDRs:
4507     case ARM::t2LDRBs:
4508     case ARM::t2LDRHs:
4509     case ARM::t2LDRSHs: {
4510       // Thumb2 mode: lsl only.
4511       unsigned ShAmt =
4512         cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue();
4513       if (ShAmt == 0 || ShAmt == 2)
4514         --Latency;
4515       break;
4516     }
4517     }
4518   } else if (DefIdx == 0 && Latency > 2 && Subtarget.isSwift()) {
4519     // FIXME: Properly handle all of the latency adjustments for address
4520     // writeback.
4521     switch (DefMCID.getOpcode()) {
4522     default: break;
4523     case ARM::LDRrs:
4524     case ARM::LDRBrs: {
4525       unsigned ShOpVal =
4526         cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue();
4527       unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
4528       if (ShImm == 0 ||
4529           ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
4530            ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))
4531         Latency -= 2;
4532       else if (ShImm == 1 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsr)
4533         --Latency;
4534       break;
4535     }
4536     case ARM::t2LDRs:
4537     case ARM::t2LDRBs:
4538     case ARM::t2LDRHs:
4539     case ARM::t2LDRSHs:
4540       // Thumb2 mode: lsl 0-3 only.
4541       Latency -= 2;
4542       break;
4543     }
4544   }
4545 
4546   if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment())
4547     switch (DefMCID.getOpcode()) {
4548     default: break;
4549     case ARM::VLD1q8:
4550     case ARM::VLD1q16:
4551     case ARM::VLD1q32:
4552     case ARM::VLD1q64:
4553     case ARM::VLD1q8wb_register:
4554     case ARM::VLD1q16wb_register:
4555     case ARM::VLD1q32wb_register:
4556     case ARM::VLD1q64wb_register:
4557     case ARM::VLD1q8wb_fixed:
4558     case ARM::VLD1q16wb_fixed:
4559     case ARM::VLD1q32wb_fixed:
4560     case ARM::VLD1q64wb_fixed:
4561     case ARM::VLD2d8:
4562     case ARM::VLD2d16:
4563     case ARM::VLD2d32:
4564     case ARM::VLD2q8Pseudo:
4565     case ARM::VLD2q16Pseudo:
4566     case ARM::VLD2q32Pseudo:
4567     case ARM::VLD2d8wb_fixed:
4568     case ARM::VLD2d16wb_fixed:
4569     case ARM::VLD2d32wb_fixed:
4570     case ARM::VLD2q8PseudoWB_fixed:
4571     case ARM::VLD2q16PseudoWB_fixed:
4572     case ARM::VLD2q32PseudoWB_fixed:
4573     case ARM::VLD2d8wb_register:
4574     case ARM::VLD2d16wb_register:
4575     case ARM::VLD2d32wb_register:
4576     case ARM::VLD2q8PseudoWB_register:
4577     case ARM::VLD2q16PseudoWB_register:
4578     case ARM::VLD2q32PseudoWB_register:
4579     case ARM::VLD3d8Pseudo:
4580     case ARM::VLD3d16Pseudo:
4581     case ARM::VLD3d32Pseudo:
4582     case ARM::VLD1d8TPseudo:
4583     case ARM::VLD1d16TPseudo:
4584     case ARM::VLD1d32TPseudo:
4585     case ARM::VLD1d64TPseudo:
4586     case ARM::VLD1d64TPseudoWB_fixed:
4587     case ARM::VLD1d64TPseudoWB_register:
4588     case ARM::VLD3d8Pseudo_UPD:
4589     case ARM::VLD3d16Pseudo_UPD:
4590     case ARM::VLD3d32Pseudo_UPD:
4591     case ARM::VLD3q8Pseudo_UPD:
4592     case ARM::VLD3q16Pseudo_UPD:
4593     case ARM::VLD3q32Pseudo_UPD:
4594     case ARM::VLD3q8oddPseudo:
4595     case ARM::VLD3q16oddPseudo:
4596     case ARM::VLD3q32oddPseudo:
4597     case ARM::VLD3q8oddPseudo_UPD:
4598     case ARM::VLD3q16oddPseudo_UPD:
4599     case ARM::VLD3q32oddPseudo_UPD:
4600     case ARM::VLD4d8Pseudo:
4601     case ARM::VLD4d16Pseudo:
4602     case ARM::VLD4d32Pseudo:
4603     case ARM::VLD1d8QPseudo:
4604     case ARM::VLD1d16QPseudo:
4605     case ARM::VLD1d32QPseudo:
4606     case ARM::VLD1d64QPseudo:
4607     case ARM::VLD1d64QPseudoWB_fixed:
4608     case ARM::VLD1d64QPseudoWB_register:
4609     case ARM::VLD1q8HighQPseudo:
4610     case ARM::VLD1q8LowQPseudo_UPD:
4611     case ARM::VLD1q8HighTPseudo:
4612     case ARM::VLD1q8LowTPseudo_UPD:
4613     case ARM::VLD1q16HighQPseudo:
4614     case ARM::VLD1q16LowQPseudo_UPD:
4615     case ARM::VLD1q16HighTPseudo:
4616     case ARM::VLD1q16LowTPseudo_UPD:
4617     case ARM::VLD1q32HighQPseudo:
4618     case ARM::VLD1q32LowQPseudo_UPD:
4619     case ARM::VLD1q32HighTPseudo:
4620     case ARM::VLD1q32LowTPseudo_UPD:
4621     case ARM::VLD1q64HighQPseudo:
4622     case ARM::VLD1q64LowQPseudo_UPD:
4623     case ARM::VLD1q64HighTPseudo:
4624     case ARM::VLD1q64LowTPseudo_UPD:
4625     case ARM::VLD4d8Pseudo_UPD:
4626     case ARM::VLD4d16Pseudo_UPD:
4627     case ARM::VLD4d32Pseudo_UPD:
4628     case ARM::VLD4q8Pseudo_UPD:
4629     case ARM::VLD4q16Pseudo_UPD:
4630     case ARM::VLD4q32Pseudo_UPD:
4631     case ARM::VLD4q8oddPseudo:
4632     case ARM::VLD4q16oddPseudo:
4633     case ARM::VLD4q32oddPseudo:
4634     case ARM::VLD4q8oddPseudo_UPD:
4635     case ARM::VLD4q16oddPseudo_UPD:
4636     case ARM::VLD4q32oddPseudo_UPD:
4637     case ARM::VLD1DUPq8:
4638     case ARM::VLD1DUPq16:
4639     case ARM::VLD1DUPq32:
4640     case ARM::VLD1DUPq8wb_fixed:
4641     case ARM::VLD1DUPq16wb_fixed:
4642     case ARM::VLD1DUPq32wb_fixed:
4643     case ARM::VLD1DUPq8wb_register:
4644     case ARM::VLD1DUPq16wb_register:
4645     case ARM::VLD1DUPq32wb_register:
4646     case ARM::VLD2DUPd8:
4647     case ARM::VLD2DUPd16:
4648     case ARM::VLD2DUPd32:
4649     case ARM::VLD2DUPd8wb_fixed:
4650     case ARM::VLD2DUPd16wb_fixed:
4651     case ARM::VLD2DUPd32wb_fixed:
4652     case ARM::VLD2DUPd8wb_register:
4653     case ARM::VLD2DUPd16wb_register:
4654     case ARM::VLD2DUPd32wb_register:
4655     case ARM::VLD2DUPq8EvenPseudo:
4656     case ARM::VLD2DUPq8OddPseudo:
4657     case ARM::VLD2DUPq16EvenPseudo:
4658     case ARM::VLD2DUPq16OddPseudo:
4659     case ARM::VLD2DUPq32EvenPseudo:
4660     case ARM::VLD2DUPq32OddPseudo:
4661     case ARM::VLD3DUPq8EvenPseudo:
4662     case ARM::VLD3DUPq8OddPseudo:
4663     case ARM::VLD3DUPq16EvenPseudo:
4664     case ARM::VLD3DUPq16OddPseudo:
4665     case ARM::VLD3DUPq32EvenPseudo:
4666     case ARM::VLD3DUPq32OddPseudo:
4667     case ARM::VLD4DUPd8Pseudo:
4668     case ARM::VLD4DUPd16Pseudo:
4669     case ARM::VLD4DUPd32Pseudo:
4670     case ARM::VLD4DUPd8Pseudo_UPD:
4671     case ARM::VLD4DUPd16Pseudo_UPD:
4672     case ARM::VLD4DUPd32Pseudo_UPD:
4673     case ARM::VLD4DUPq8EvenPseudo:
4674     case ARM::VLD4DUPq8OddPseudo:
4675     case ARM::VLD4DUPq16EvenPseudo:
4676     case ARM::VLD4DUPq16OddPseudo:
4677     case ARM::VLD4DUPq32EvenPseudo:
4678     case ARM::VLD4DUPq32OddPseudo:
4679     case ARM::VLD1LNq8Pseudo:
4680     case ARM::VLD1LNq16Pseudo:
4681     case ARM::VLD1LNq32Pseudo:
4682     case ARM::VLD1LNq8Pseudo_UPD:
4683     case ARM::VLD1LNq16Pseudo_UPD:
4684     case ARM::VLD1LNq32Pseudo_UPD:
4685     case ARM::VLD2LNd8Pseudo:
4686     case ARM::VLD2LNd16Pseudo:
4687     case ARM::VLD2LNd32Pseudo:
4688     case ARM::VLD2LNq16Pseudo:
4689     case ARM::VLD2LNq32Pseudo:
4690     case ARM::VLD2LNd8Pseudo_UPD:
4691     case ARM::VLD2LNd16Pseudo_UPD:
4692     case ARM::VLD2LNd32Pseudo_UPD:
4693     case ARM::VLD2LNq16Pseudo_UPD:
4694     case ARM::VLD2LNq32Pseudo_UPD:
4695     case ARM::VLD4LNd8Pseudo:
4696     case ARM::VLD4LNd16Pseudo:
4697     case ARM::VLD4LNd32Pseudo:
4698     case ARM::VLD4LNq16Pseudo:
4699     case ARM::VLD4LNq32Pseudo:
4700     case ARM::VLD4LNd8Pseudo_UPD:
4701     case ARM::VLD4LNd16Pseudo_UPD:
4702     case ARM::VLD4LNd32Pseudo_UPD:
4703     case ARM::VLD4LNq16Pseudo_UPD:
4704     case ARM::VLD4LNq32Pseudo_UPD:
4705       // If the address is not 64-bit aligned, the latencies of these
4706       // instructions increases by one.
4707       ++Latency;
4708       break;
4709     }
4710 
4711   return Latency;
4712 }
4713 
4714 unsigned ARMBaseInstrInfo::getPredicationCost(const MachineInstr &MI) const {
4715   if (MI.isCopyLike() || MI.isInsertSubreg() || MI.isRegSequence() ||
4716       MI.isImplicitDef())
4717     return 0;
4718 
4719   if (MI.isBundle())
4720     return 0;
4721 
4722   const MCInstrDesc &MCID = MI.getDesc();
4723 
4724   if (MCID.isCall() || (MCID.hasImplicitDefOfPhysReg(ARM::CPSR) &&
4725                         !Subtarget.cheapPredicableCPSRDef())) {
4726     // When predicated, CPSR is an additional source operand for CPSR updating
4727     // instructions, this apparently increases their latencies.
4728     return 1;
4729   }
4730   return 0;
4731 }
4732 
4733 unsigned ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
4734                                            const MachineInstr &MI,
4735                                            unsigned *PredCost) const {
4736   if (MI.isCopyLike() || MI.isInsertSubreg() || MI.isRegSequence() ||
4737       MI.isImplicitDef())
4738     return 1;
4739 
4740   // An instruction scheduler typically runs on unbundled instructions, however
4741   // other passes may query the latency of a bundled instruction.
4742   if (MI.isBundle()) {
4743     unsigned Latency = 0;
4744     MachineBasicBlock::const_instr_iterator I = MI.getIterator();
4745     MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end();
4746     while (++I != E && I->isInsideBundle()) {
4747       if (I->getOpcode() != ARM::t2IT)
4748         Latency += getInstrLatency(ItinData, *I, PredCost);
4749     }
4750     return Latency;
4751   }
4752 
4753   const MCInstrDesc &MCID = MI.getDesc();
4754   if (PredCost && (MCID.isCall() || (MCID.hasImplicitDefOfPhysReg(ARM::CPSR) &&
4755                                      !Subtarget.cheapPredicableCPSRDef()))) {
4756     // When predicated, CPSR is an additional source operand for CPSR updating
4757     // instructions, this apparently increases their latencies.
4758     *PredCost = 1;
4759   }
4760   // Be sure to call getStageLatency for an empty itinerary in case it has a
4761   // valid MinLatency property.
4762   if (!ItinData)
4763     return MI.mayLoad() ? 3 : 1;
4764 
4765   unsigned Class = MCID.getSchedClass();
4766 
4767   // For instructions with variable uops, use uops as latency.
4768   if (!ItinData->isEmpty() && ItinData->getNumMicroOps(Class) < 0)
4769     return getNumMicroOps(ItinData, MI);
4770 
4771   // For the common case, fall back on the itinerary's latency.
4772   unsigned Latency = ItinData->getStageLatency(Class);
4773 
4774   // Adjust for dynamic def-side opcode variants not captured by the itinerary.
4775   unsigned DefAlign =
4776       MI.hasOneMemOperand() ? (*MI.memoperands_begin())->getAlign().value() : 0;
4777   int Adj = adjustDefLatency(Subtarget, MI, MCID, DefAlign);
4778   if (Adj >= 0 || (int)Latency > -Adj) {
4779     return Latency + Adj;
4780   }
4781   return Latency;
4782 }
4783 
4784 int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
4785                                       SDNode *Node) const {
4786   if (!Node->isMachineOpcode())
4787     return 1;
4788 
4789   if (!ItinData || ItinData->isEmpty())
4790     return 1;
4791 
4792   unsigned Opcode = Node->getMachineOpcode();
4793   switch (Opcode) {
4794   default:
4795     return ItinData->getStageLatency(get(Opcode).getSchedClass());
4796   case ARM::VLDMQIA:
4797   case ARM::VSTMQIA:
4798     return 2;
4799   }
4800 }
4801 
4802 bool ARMBaseInstrInfo::hasHighOperandLatency(const TargetSchedModel &SchedModel,
4803                                              const MachineRegisterInfo *MRI,
4804                                              const MachineInstr &DefMI,
4805                                              unsigned DefIdx,
4806                                              const MachineInstr &UseMI,
4807                                              unsigned UseIdx) const {
4808   unsigned DDomain = DefMI.getDesc().TSFlags & ARMII::DomainMask;
4809   unsigned UDomain = UseMI.getDesc().TSFlags & ARMII::DomainMask;
4810   if (Subtarget.nonpipelinedVFP() &&
4811       (DDomain == ARMII::DomainVFP || UDomain == ARMII::DomainVFP))
4812     return true;
4813 
4814   // Hoist VFP / NEON instructions with 4 or higher latency.
4815   unsigned Latency =
4816       SchedModel.computeOperandLatency(&DefMI, DefIdx, &UseMI, UseIdx);
4817   if (Latency <= 3)
4818     return false;
4819   return DDomain == ARMII::DomainVFP || DDomain == ARMII::DomainNEON ||
4820          UDomain == ARMII::DomainVFP || UDomain == ARMII::DomainNEON;
4821 }
4822 
4823 bool ARMBaseInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel,
4824                                         const MachineInstr &DefMI,
4825                                         unsigned DefIdx) const {
4826   const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
4827   if (!ItinData || ItinData->isEmpty())
4828     return false;
4829 
4830   unsigned DDomain = DefMI.getDesc().TSFlags & ARMII::DomainMask;
4831   if (DDomain == ARMII::DomainGeneral) {
4832     unsigned DefClass = DefMI.getDesc().getSchedClass();
4833     int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
4834     return (DefCycle != -1 && DefCycle <= 2);
4835   }
4836   return false;
4837 }
4838 
4839 bool ARMBaseInstrInfo::verifyInstruction(const MachineInstr &MI,
4840                                          StringRef &ErrInfo) const {
4841   if (convertAddSubFlagsOpcode(MI.getOpcode())) {
4842     ErrInfo = "Pseudo flag setting opcodes only exist in Selection DAG";
4843     return false;
4844   }
4845   if (MI.getOpcode() == ARM::tMOVr && !Subtarget.hasV6Ops()) {
4846     // Make sure we don't generate a lo-lo mov that isn't supported.
4847     if (!ARM::hGPRRegClass.contains(MI.getOperand(0).getReg()) &&
4848         !ARM::hGPRRegClass.contains(MI.getOperand(1).getReg())) {
4849       ErrInfo = "Non-flag-setting Thumb1 mov is v6-only";
4850       return false;
4851     }
4852   }
4853   if (MI.getOpcode() == ARM::tPUSH ||
4854       MI.getOpcode() == ARM::tPOP ||
4855       MI.getOpcode() == ARM::tPOP_RET) {
4856     for (const MachineOperand &MO : llvm::drop_begin(MI.operands(), 2)) {
4857       if (MO.isImplicit() || !MO.isReg())
4858         continue;
4859       Register Reg = MO.getReg();
4860       if (Reg < ARM::R0 || Reg > ARM::R7) {
4861         if (!(MI.getOpcode() == ARM::tPUSH && Reg == ARM::LR) &&
4862             !(MI.getOpcode() == ARM::tPOP_RET && Reg == ARM::PC)) {
4863           ErrInfo = "Unsupported register in Thumb1 push/pop";
4864           return false;
4865         }
4866       }
4867     }
4868   }
4869   if (MI.getOpcode() == ARM::MVE_VMOV_q_rr) {
4870     assert(MI.getOperand(4).isImm() && MI.getOperand(5).isImm());
4871     if ((MI.getOperand(4).getImm() != 2 && MI.getOperand(4).getImm() != 3) ||
4872         MI.getOperand(4).getImm() != MI.getOperand(5).getImm() + 2) {
4873       ErrInfo = "Incorrect array index for MVE_VMOV_q_rr";
4874       return false;
4875     }
4876   }
4877   return true;
4878 }
4879 
4880 void ARMBaseInstrInfo::expandLoadStackGuardBase(MachineBasicBlock::iterator MI,
4881                                                 unsigned LoadImmOpc,
4882                                                 unsigned LoadOpc) const {
4883   assert(!Subtarget.isROPI() && !Subtarget.isRWPI() &&
4884          "ROPI/RWPI not currently supported with stack guard");
4885 
4886   MachineBasicBlock &MBB = *MI->getParent();
4887   DebugLoc DL = MI->getDebugLoc();
4888   Register Reg = MI->getOperand(0).getReg();
4889   MachineInstrBuilder MIB;
4890   unsigned int Offset = 0;
4891 
4892   if (LoadImmOpc == ARM::MRC || LoadImmOpc == ARM::t2MRC) {
4893     assert(Subtarget.isReadTPHard() &&
4894            "TLS stack protector requires hardware TLS register");
4895 
4896     BuildMI(MBB, MI, DL, get(LoadImmOpc), Reg)
4897         .addImm(15)
4898         .addImm(0)
4899         .addImm(13)
4900         .addImm(0)
4901         .addImm(3)
4902         .add(predOps(ARMCC::AL));
4903 
4904     Module &M = *MBB.getParent()->getFunction().getParent();
4905     Offset = M.getStackProtectorGuardOffset();
4906     if (Offset & ~0xfffU) {
4907       // The offset won't fit in the LDR's 12-bit immediate field, so emit an
4908       // extra ADD to cover the delta. This gives us a guaranteed 8 additional
4909       // bits, resulting in a range of 0 to +1 MiB for the guard offset.
4910       unsigned AddOpc = (LoadImmOpc == ARM::MRC) ? ARM::ADDri : ARM::t2ADDri;
4911       BuildMI(MBB, MI, DL, get(AddOpc), Reg)
4912           .addReg(Reg, RegState::Kill)
4913           .addImm(Offset & ~0xfffU)
4914           .add(predOps(ARMCC::AL))
4915           .addReg(0);
4916       Offset &= 0xfffU;
4917     }
4918   } else {
4919     const GlobalValue *GV =
4920         cast<GlobalValue>((*MI->memoperands_begin())->getValue());
4921     bool IsIndirect = Subtarget.isGVIndirectSymbol(GV);
4922 
4923     unsigned TargetFlags = ARMII::MO_NO_FLAG;
4924     if (Subtarget.isTargetMachO()) {
4925       TargetFlags |= ARMII::MO_NONLAZY;
4926     } else if (Subtarget.isTargetCOFF()) {
4927       if (GV->hasDLLImportStorageClass())
4928         TargetFlags |= ARMII::MO_DLLIMPORT;
4929       else if (IsIndirect)
4930         TargetFlags |= ARMII::MO_COFFSTUB;
4931     } else if (Subtarget.isGVInGOT(GV)) {
4932       TargetFlags |= ARMII::MO_GOT;
4933     }
4934 
4935     BuildMI(MBB, MI, DL, get(LoadImmOpc), Reg)
4936         .addGlobalAddress(GV, 0, TargetFlags);
4937 
4938     if (IsIndirect) {
4939       MIB = BuildMI(MBB, MI, DL, get(LoadOpc), Reg);
4940       MIB.addReg(Reg, RegState::Kill).addImm(0);
4941       auto Flags = MachineMemOperand::MOLoad |
4942                    MachineMemOperand::MODereferenceable |
4943                    MachineMemOperand::MOInvariant;
4944       MachineMemOperand *MMO = MBB.getParent()->getMachineMemOperand(
4945           MachinePointerInfo::getGOT(*MBB.getParent()), Flags, 4, Align(4));
4946       MIB.addMemOperand(MMO).add(predOps(ARMCC::AL));
4947     }
4948   }
4949 
4950   MIB = BuildMI(MBB, MI, DL, get(LoadOpc), Reg);
4951   MIB.addReg(Reg, RegState::Kill)
4952       .addImm(Offset)
4953       .cloneMemRefs(*MI)
4954       .add(predOps(ARMCC::AL));
4955 }
4956 
4957 bool
4958 ARMBaseInstrInfo::isFpMLxInstruction(unsigned Opcode, unsigned &MulOpc,
4959                                      unsigned &AddSubOpc,
4960                                      bool &NegAcc, bool &HasLane) const {
4961   DenseMap<unsigned, unsigned>::const_iterator I = MLxEntryMap.find(Opcode);
4962   if (I == MLxEntryMap.end())
4963     return false;
4964 
4965   const ARM_MLxEntry &Entry = ARM_MLxTable[I->second];
4966   MulOpc = Entry.MulOpc;
4967   AddSubOpc = Entry.AddSubOpc;
4968   NegAcc = Entry.NegAcc;
4969   HasLane = Entry.HasLane;
4970   return true;
4971 }
4972 
4973 //===----------------------------------------------------------------------===//
4974 // Execution domains.
4975 //===----------------------------------------------------------------------===//
4976 //
4977 // Some instructions go down the NEON pipeline, some go down the VFP pipeline,
4978 // and some can go down both.  The vmov instructions go down the VFP pipeline,
4979 // but they can be changed to vorr equivalents that are executed by the NEON
4980 // pipeline.
4981 //
4982 // We use the following execution domain numbering:
4983 //
4984 enum ARMExeDomain {
4985   ExeGeneric = 0,
4986   ExeVFP = 1,
4987   ExeNEON = 2
4988 };
4989 
4990 //
4991 // Also see ARMInstrFormats.td and Domain* enums in ARMBaseInfo.h
4992 //
4993 std::pair<uint16_t, uint16_t>
4994 ARMBaseInstrInfo::getExecutionDomain(const MachineInstr &MI) const {
4995   // If we don't have access to NEON instructions then we won't be able
4996   // to swizzle anything to the NEON domain. Check to make sure.
4997   if (Subtarget.hasNEON()) {
4998     // VMOVD, VMOVRS and VMOVSR are VFP instructions, but can be changed to NEON
4999     // if they are not predicated.
5000     if (MI.getOpcode() == ARM::VMOVD && !isPredicated(MI))
5001       return std::make_pair(ExeVFP, (1 << ExeVFP) | (1 << ExeNEON));
5002 
5003     // CortexA9 is particularly picky about mixing the two and wants these
5004     // converted.
5005     if (Subtarget.useNEONForFPMovs() && !isPredicated(MI) &&
5006         (MI.getOpcode() == ARM::VMOVRS || MI.getOpcode() == ARM::VMOVSR ||
5007          MI.getOpcode() == ARM::VMOVS))
5008       return std::make_pair(ExeVFP, (1 << ExeVFP) | (1 << ExeNEON));
5009   }
5010   // No other instructions can be swizzled, so just determine their domain.
5011   unsigned Domain = MI.getDesc().TSFlags & ARMII::DomainMask;
5012 
5013   if (Domain & ARMII::DomainNEON)
5014     return std::make_pair(ExeNEON, 0);
5015 
5016   // Certain instructions can go either way on Cortex-A8.
5017   // Treat them as NEON instructions.
5018   if ((Domain & ARMII::DomainNEONA8) && Subtarget.isCortexA8())
5019     return std::make_pair(ExeNEON, 0);
5020 
5021   if (Domain & ARMII::DomainVFP)
5022     return std::make_pair(ExeVFP, 0);
5023 
5024   return std::make_pair(ExeGeneric, 0);
5025 }
5026 
5027 static unsigned getCorrespondingDRegAndLane(const TargetRegisterInfo *TRI,
5028                                             unsigned SReg, unsigned &Lane) {
5029   unsigned DReg = TRI->getMatchingSuperReg(SReg, ARM::ssub_0, &ARM::DPRRegClass);
5030   Lane = 0;
5031 
5032   if (DReg != ARM::NoRegister)
5033    return DReg;
5034 
5035   Lane = 1;
5036   DReg = TRI->getMatchingSuperReg(SReg, ARM::ssub_1, &ARM::DPRRegClass);
5037 
5038   assert(DReg && "S-register with no D super-register?");
5039   return DReg;
5040 }
5041 
5042 /// getImplicitSPRUseForDPRUse - Given a use of a DPR register and lane,
5043 /// set ImplicitSReg to a register number that must be marked as implicit-use or
5044 /// zero if no register needs to be defined as implicit-use.
5045 ///
5046 /// If the function cannot determine if an SPR should be marked implicit use or
5047 /// not, it returns false.
5048 ///
5049 /// This function handles cases where an instruction is being modified from taking
5050 /// an SPR to a DPR[Lane]. A use of the DPR is being added, which may conflict
5051 /// with an earlier def of an SPR corresponding to DPR[Lane^1] (i.e. the other
5052 /// lane of the DPR).
5053 ///
5054 /// If the other SPR is defined, an implicit-use of it should be added. Else,
5055 /// (including the case where the DPR itself is defined), it should not.
5056 ///
5057 static bool getImplicitSPRUseForDPRUse(const TargetRegisterInfo *TRI,
5058                                        MachineInstr &MI, unsigned DReg,
5059                                        unsigned Lane, unsigned &ImplicitSReg) {
5060   // If the DPR is defined or used already, the other SPR lane will be chained
5061   // correctly, so there is nothing to be done.
5062   if (MI.definesRegister(DReg, TRI) || MI.readsRegister(DReg, TRI)) {
5063     ImplicitSReg = 0;
5064     return true;
5065   }
5066 
5067   // Otherwise we need to go searching to see if the SPR is set explicitly.
5068   ImplicitSReg = TRI->getSubReg(DReg,
5069                                 (Lane & 1) ? ARM::ssub_0 : ARM::ssub_1);
5070   MachineBasicBlock::LivenessQueryResult LQR =
5071       MI.getParent()->computeRegisterLiveness(TRI, ImplicitSReg, MI);
5072 
5073   if (LQR == MachineBasicBlock::LQR_Live)
5074     return true;
5075   else if (LQR == MachineBasicBlock::LQR_Unknown)
5076     return false;
5077 
5078   // If the register is known not to be live, there is no need to add an
5079   // implicit-use.
5080   ImplicitSReg = 0;
5081   return true;
5082 }
5083 
5084 void ARMBaseInstrInfo::setExecutionDomain(MachineInstr &MI,
5085                                           unsigned Domain) const {
5086   unsigned DstReg, SrcReg, DReg;
5087   unsigned Lane;
5088   MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
5089   const TargetRegisterInfo *TRI = &getRegisterInfo();
5090   switch (MI.getOpcode()) {
5091   default:
5092     llvm_unreachable("cannot handle opcode!");
5093     break;
5094   case ARM::VMOVD:
5095     if (Domain != ExeNEON)
5096       break;
5097 
5098     // Zap the predicate operands.
5099     assert(!isPredicated(MI) && "Cannot predicate a VORRd");
5100 
5101     // Make sure we've got NEON instructions.
5102     assert(Subtarget.hasNEON() && "VORRd requires NEON");
5103 
5104     // Source instruction is %DDst = VMOVD %DSrc, 14, %noreg (; implicits)
5105     DstReg = MI.getOperand(0).getReg();
5106     SrcReg = MI.getOperand(1).getReg();
5107 
5108     for (unsigned i = MI.getDesc().getNumOperands(); i; --i)
5109       MI.RemoveOperand(i - 1);
5110 
5111     // Change to a %DDst = VORRd %DSrc, %DSrc, 14, %noreg (; implicits)
5112     MI.setDesc(get(ARM::VORRd));
5113     MIB.addReg(DstReg, RegState::Define)
5114         .addReg(SrcReg)
5115         .addReg(SrcReg)
5116         .add(predOps(ARMCC::AL));
5117     break;
5118   case ARM::VMOVRS:
5119     if (Domain != ExeNEON)
5120       break;
5121     assert(!isPredicated(MI) && "Cannot predicate a VGETLN");
5122 
5123     // Source instruction is %RDst = VMOVRS %SSrc, 14, %noreg (; implicits)
5124     DstReg = MI.getOperand(0).getReg();
5125     SrcReg = MI.getOperand(1).getReg();
5126 
5127     for (unsigned i = MI.getDesc().getNumOperands(); i; --i)
5128       MI.RemoveOperand(i - 1);
5129 
5130     DReg = getCorrespondingDRegAndLane(TRI, SrcReg, Lane);
5131 
5132     // Convert to %RDst = VGETLNi32 %DSrc, Lane, 14, %noreg (; imps)
5133     // Note that DSrc has been widened and the other lane may be undef, which
5134     // contaminates the entire register.
5135     MI.setDesc(get(ARM::VGETLNi32));
5136     MIB.addReg(DstReg, RegState::Define)
5137         .addReg(DReg, RegState::Undef)
5138         .addImm(Lane)
5139         .add(predOps(ARMCC::AL));
5140 
5141     // The old source should be an implicit use, otherwise we might think it
5142     // was dead before here.
5143     MIB.addReg(SrcReg, RegState::Implicit);
5144     break;
5145   case ARM::VMOVSR: {
5146     if (Domain != ExeNEON)
5147       break;
5148     assert(!isPredicated(MI) && "Cannot predicate a VSETLN");
5149 
5150     // Source instruction is %SDst = VMOVSR %RSrc, 14, %noreg (; implicits)
5151     DstReg = MI.getOperand(0).getReg();
5152     SrcReg = MI.getOperand(1).getReg();
5153 
5154     DReg = getCorrespondingDRegAndLane(TRI, DstReg, Lane);
5155 
5156     unsigned ImplicitSReg;
5157     if (!getImplicitSPRUseForDPRUse(TRI, MI, DReg, Lane, ImplicitSReg))
5158       break;
5159 
5160     for (unsigned i = MI.getDesc().getNumOperands(); i; --i)
5161       MI.RemoveOperand(i - 1);
5162 
5163     // Convert to %DDst = VSETLNi32 %DDst, %RSrc, Lane, 14, %noreg (; imps)
5164     // Again DDst may be undefined at the beginning of this instruction.
5165     MI.setDesc(get(ARM::VSETLNi32));
5166     MIB.addReg(DReg, RegState::Define)
5167         .addReg(DReg, getUndefRegState(!MI.readsRegister(DReg, TRI)))
5168         .addReg(SrcReg)
5169         .addImm(Lane)
5170         .add(predOps(ARMCC::AL));
5171 
5172     // The narrower destination must be marked as set to keep previous chains
5173     // in place.
5174     MIB.addReg(DstReg, RegState::Define | RegState::Implicit);
5175     if (ImplicitSReg != 0)
5176       MIB.addReg(ImplicitSReg, RegState::Implicit);
5177     break;
5178     }
5179     case ARM::VMOVS: {
5180       if (Domain != ExeNEON)
5181         break;
5182 
5183       // Source instruction is %SDst = VMOVS %SSrc, 14, %noreg (; implicits)
5184       DstReg = MI.getOperand(0).getReg();
5185       SrcReg = MI.getOperand(1).getReg();
5186 
5187       unsigned DstLane = 0, SrcLane = 0, DDst, DSrc;
5188       DDst = getCorrespondingDRegAndLane(TRI, DstReg, DstLane);
5189       DSrc = getCorrespondingDRegAndLane(TRI, SrcReg, SrcLane);
5190 
5191       unsigned ImplicitSReg;
5192       if (!getImplicitSPRUseForDPRUse(TRI, MI, DSrc, SrcLane, ImplicitSReg))
5193         break;
5194 
5195       for (unsigned i = MI.getDesc().getNumOperands(); i; --i)
5196         MI.RemoveOperand(i - 1);
5197 
5198       if (DSrc == DDst) {
5199         // Destination can be:
5200         //     %DDst = VDUPLN32d %DDst, Lane, 14, %noreg (; implicits)
5201         MI.setDesc(get(ARM::VDUPLN32d));
5202         MIB.addReg(DDst, RegState::Define)
5203             .addReg(DDst, getUndefRegState(!MI.readsRegister(DDst, TRI)))
5204             .addImm(SrcLane)
5205             .add(predOps(ARMCC::AL));
5206 
5207         // Neither the source or the destination are naturally represented any
5208         // more, so add them in manually.
5209         MIB.addReg(DstReg, RegState::Implicit | RegState::Define);
5210         MIB.addReg(SrcReg, RegState::Implicit);
5211         if (ImplicitSReg != 0)
5212           MIB.addReg(ImplicitSReg, RegState::Implicit);
5213         break;
5214       }
5215 
5216       // In general there's no single instruction that can perform an S <-> S
5217       // move in NEON space, but a pair of VEXT instructions *can* do the
5218       // job. It turns out that the VEXTs needed will only use DSrc once, with
5219       // the position based purely on the combination of lane-0 and lane-1
5220       // involved. For example
5221       //     vmov s0, s2 -> vext.32 d0, d0, d1, #1  vext.32 d0, d0, d0, #1
5222       //     vmov s1, s3 -> vext.32 d0, d1, d0, #1  vext.32 d0, d0, d0, #1
5223       //     vmov s0, s3 -> vext.32 d0, d0, d0, #1  vext.32 d0, d1, d0, #1
5224       //     vmov s1, s2 -> vext.32 d0, d0, d0, #1  vext.32 d0, d0, d1, #1
5225       //
5226       // Pattern of the MachineInstrs is:
5227       //     %DDst = VEXTd32 %DSrc1, %DSrc2, Lane, 14, %noreg (;implicits)
5228       MachineInstrBuilder NewMIB;
5229       NewMIB = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(ARM::VEXTd32),
5230                        DDst);
5231 
5232       // On the first instruction, both DSrc and DDst may be undef if present.
5233       // Specifically when the original instruction didn't have them as an
5234       // <imp-use>.
5235       unsigned CurReg = SrcLane == 1 && DstLane == 1 ? DSrc : DDst;
5236       bool CurUndef = !MI.readsRegister(CurReg, TRI);
5237       NewMIB.addReg(CurReg, getUndefRegState(CurUndef));
5238 
5239       CurReg = SrcLane == 0 && DstLane == 0 ? DSrc : DDst;
5240       CurUndef = !MI.readsRegister(CurReg, TRI);
5241       NewMIB.addReg(CurReg, getUndefRegState(CurUndef))
5242             .addImm(1)
5243             .add(predOps(ARMCC::AL));
5244 
5245       if (SrcLane == DstLane)
5246         NewMIB.addReg(SrcReg, RegState::Implicit);
5247 
5248       MI.setDesc(get(ARM::VEXTd32));
5249       MIB.addReg(DDst, RegState::Define);
5250 
5251       // On the second instruction, DDst has definitely been defined above, so
5252       // it is not undef. DSrc, if present, can be undef as above.
5253       CurReg = SrcLane == 1 && DstLane == 0 ? DSrc : DDst;
5254       CurUndef = CurReg == DSrc && !MI.readsRegister(CurReg, TRI);
5255       MIB.addReg(CurReg, getUndefRegState(CurUndef));
5256 
5257       CurReg = SrcLane == 0 && DstLane == 1 ? DSrc : DDst;
5258       CurUndef = CurReg == DSrc && !MI.readsRegister(CurReg, TRI);
5259       MIB.addReg(CurReg, getUndefRegState(CurUndef))
5260          .addImm(1)
5261          .add(predOps(ARMCC::AL));
5262 
5263       if (SrcLane != DstLane)
5264         MIB.addReg(SrcReg, RegState::Implicit);
5265 
5266       // As before, the original destination is no longer represented, add it
5267       // implicitly.
5268       MIB.addReg(DstReg, RegState::Define | RegState::Implicit);
5269       if (ImplicitSReg != 0)
5270         MIB.addReg(ImplicitSReg, RegState::Implicit);
5271       break;
5272     }
5273   }
5274 }
5275 
5276 //===----------------------------------------------------------------------===//
5277 // Partial register updates
5278 //===----------------------------------------------------------------------===//
5279 //
5280 // Swift renames NEON registers with 64-bit granularity.  That means any
5281 // instruction writing an S-reg implicitly reads the containing D-reg.  The
5282 // problem is mostly avoided by translating f32 operations to v2f32 operations
5283 // on D-registers, but f32 loads are still a problem.
5284 //
5285 // These instructions can load an f32 into a NEON register:
5286 //
5287 // VLDRS - Only writes S, partial D update.
5288 // VLD1LNd32 - Writes all D-regs, explicit partial D update, 2 uops.
5289 // VLD1DUPd32 - Writes all D-regs, no partial reg update, 2 uops.
5290 //
5291 // FCONSTD can be used as a dependency-breaking instruction.
5292 unsigned ARMBaseInstrInfo::getPartialRegUpdateClearance(
5293     const MachineInstr &MI, unsigned OpNum,
5294     const TargetRegisterInfo *TRI) const {
5295   auto PartialUpdateClearance = Subtarget.getPartialUpdateClearance();
5296   if (!PartialUpdateClearance)
5297     return 0;
5298 
5299   assert(TRI && "Need TRI instance");
5300 
5301   const MachineOperand &MO = MI.getOperand(OpNum);
5302   if (MO.readsReg())
5303     return 0;
5304   Register Reg = MO.getReg();
5305   int UseOp = -1;
5306 
5307   switch (MI.getOpcode()) {
5308   // Normal instructions writing only an S-register.
5309   case ARM::VLDRS:
5310   case ARM::FCONSTS:
5311   case ARM::VMOVSR:
5312   case ARM::VMOVv8i8:
5313   case ARM::VMOVv4i16:
5314   case ARM::VMOVv2i32:
5315   case ARM::VMOVv2f32:
5316   case ARM::VMOVv1i64:
5317     UseOp = MI.findRegisterUseOperandIdx(Reg, false, TRI);
5318     break;
5319 
5320     // Explicitly reads the dependency.
5321   case ARM::VLD1LNd32:
5322     UseOp = 3;
5323     break;
5324   default:
5325     return 0;
5326   }
5327 
5328   // If this instruction actually reads a value from Reg, there is no unwanted
5329   // dependency.
5330   if (UseOp != -1 && MI.getOperand(UseOp).readsReg())
5331     return 0;
5332 
5333   // We must be able to clobber the whole D-reg.
5334   if (Register::isVirtualRegister(Reg)) {
5335     // Virtual register must be a def undef foo:ssub_0 operand.
5336     if (!MO.getSubReg() || MI.readsVirtualRegister(Reg))
5337       return 0;
5338   } else if (ARM::SPRRegClass.contains(Reg)) {
5339     // Physical register: MI must define the full D-reg.
5340     unsigned DReg = TRI->getMatchingSuperReg(Reg, ARM::ssub_0,
5341                                              &ARM::DPRRegClass);
5342     if (!DReg || !MI.definesRegister(DReg, TRI))
5343       return 0;
5344   }
5345 
5346   // MI has an unwanted D-register dependency.
5347   // Avoid defs in the previous N instructrions.
5348   return PartialUpdateClearance;
5349 }
5350 
5351 // Break a partial register dependency after getPartialRegUpdateClearance
5352 // returned non-zero.
5353 void ARMBaseInstrInfo::breakPartialRegDependency(
5354     MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const {
5355   assert(OpNum < MI.getDesc().getNumDefs() && "OpNum is not a def");
5356   assert(TRI && "Need TRI instance");
5357 
5358   const MachineOperand &MO = MI.getOperand(OpNum);
5359   Register Reg = MO.getReg();
5360   assert(Register::isPhysicalRegister(Reg) &&
5361          "Can't break virtual register dependencies.");
5362   unsigned DReg = Reg;
5363 
5364   // If MI defines an S-reg, find the corresponding D super-register.
5365   if (ARM::SPRRegClass.contains(Reg)) {
5366     DReg = ARM::D0 + (Reg - ARM::S0) / 2;
5367     assert(TRI->isSuperRegister(Reg, DReg) && "Register enums broken");
5368   }
5369 
5370   assert(ARM::DPRRegClass.contains(DReg) && "Can only break D-reg deps");
5371   assert(MI.definesRegister(DReg, TRI) && "MI doesn't clobber full D-reg");
5372 
5373   // FIXME: In some cases, VLDRS can be changed to a VLD1DUPd32 which defines
5374   // the full D-register by loading the same value to both lanes.  The
5375   // instruction is micro-coded with 2 uops, so don't do this until we can
5376   // properly schedule micro-coded instructions.  The dispatcher stalls cause
5377   // too big regressions.
5378 
5379   // Insert the dependency-breaking FCONSTD before MI.
5380   // 96 is the encoding of 0.5, but the actual value doesn't matter here.
5381   BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(ARM::FCONSTD), DReg)
5382       .addImm(96)
5383       .add(predOps(ARMCC::AL));
5384   MI.addRegisterKilled(DReg, TRI, true);
5385 }
5386 
5387 bool ARMBaseInstrInfo::hasNOP() const {
5388   return Subtarget.getFeatureBits()[ARM::HasV6KOps];
5389 }
5390 
5391 bool ARMBaseInstrInfo::isSwiftFastImmShift(const MachineInstr *MI) const {
5392   if (MI->getNumOperands() < 4)
5393     return true;
5394   unsigned ShOpVal = MI->getOperand(3).getImm();
5395   unsigned ShImm = ARM_AM::getSORegOffset(ShOpVal);
5396   // Swift supports faster shifts for: lsl 2, lsl 1, and lsr 1.
5397   if ((ShImm == 1 && ARM_AM::getSORegShOp(ShOpVal) == ARM_AM::lsr) ||
5398       ((ShImm == 1 || ShImm == 2) &&
5399        ARM_AM::getSORegShOp(ShOpVal) == ARM_AM::lsl))
5400     return true;
5401 
5402   return false;
5403 }
5404 
5405 bool ARMBaseInstrInfo::getRegSequenceLikeInputs(
5406     const MachineInstr &MI, unsigned DefIdx,
5407     SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
5408   assert(DefIdx < MI.getDesc().getNumDefs() && "Invalid definition index");
5409   assert(MI.isRegSequenceLike() && "Invalid kind of instruction");
5410 
5411   switch (MI.getOpcode()) {
5412   case ARM::VMOVDRR:
5413     // dX = VMOVDRR rY, rZ
5414     // is the same as:
5415     // dX = REG_SEQUENCE rY, ssub_0, rZ, ssub_1
5416     // Populate the InputRegs accordingly.
5417     // rY
5418     const MachineOperand *MOReg = &MI.getOperand(1);
5419     if (!MOReg->isUndef())
5420       InputRegs.push_back(RegSubRegPairAndIdx(MOReg->getReg(),
5421                                               MOReg->getSubReg(), ARM::ssub_0));
5422     // rZ
5423     MOReg = &MI.getOperand(2);
5424     if (!MOReg->isUndef())
5425       InputRegs.push_back(RegSubRegPairAndIdx(MOReg->getReg(),
5426                                               MOReg->getSubReg(), ARM::ssub_1));
5427     return true;
5428   }
5429   llvm_unreachable("Target dependent opcode missing");
5430 }
5431 
5432 bool ARMBaseInstrInfo::getExtractSubregLikeInputs(
5433     const MachineInstr &MI, unsigned DefIdx,
5434     RegSubRegPairAndIdx &InputReg) const {
5435   assert(DefIdx < MI.getDesc().getNumDefs() && "Invalid definition index");
5436   assert(MI.isExtractSubregLike() && "Invalid kind of instruction");
5437 
5438   switch (MI.getOpcode()) {
5439   case ARM::VMOVRRD:
5440     // rX, rY = VMOVRRD dZ
5441     // is the same as:
5442     // rX = EXTRACT_SUBREG dZ, ssub_0
5443     // rY = EXTRACT_SUBREG dZ, ssub_1
5444     const MachineOperand &MOReg = MI.getOperand(2);
5445     if (MOReg.isUndef())
5446       return false;
5447     InputReg.Reg = MOReg.getReg();
5448     InputReg.SubReg = MOReg.getSubReg();
5449     InputReg.SubIdx = DefIdx == 0 ? ARM::ssub_0 : ARM::ssub_1;
5450     return true;
5451   }
5452   llvm_unreachable("Target dependent opcode missing");
5453 }
5454 
5455 bool ARMBaseInstrInfo::getInsertSubregLikeInputs(
5456     const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg,
5457     RegSubRegPairAndIdx &InsertedReg) const {
5458   assert(DefIdx < MI.getDesc().getNumDefs() && "Invalid definition index");
5459   assert(MI.isInsertSubregLike() && "Invalid kind of instruction");
5460 
5461   switch (MI.getOpcode()) {
5462   case ARM::VSETLNi32:
5463   case ARM::MVE_VMOV_to_lane_32:
5464     // dX = VSETLNi32 dY, rZ, imm
5465     // qX = MVE_VMOV_to_lane_32 qY, rZ, imm
5466     const MachineOperand &MOBaseReg = MI.getOperand(1);
5467     const MachineOperand &MOInsertedReg = MI.getOperand(2);
5468     if (MOInsertedReg.isUndef())
5469       return false;
5470     const MachineOperand &MOIndex = MI.getOperand(3);
5471     BaseReg.Reg = MOBaseReg.getReg();
5472     BaseReg.SubReg = MOBaseReg.getSubReg();
5473 
5474     InsertedReg.Reg = MOInsertedReg.getReg();
5475     InsertedReg.SubReg = MOInsertedReg.getSubReg();
5476     InsertedReg.SubIdx = ARM::ssub_0 + MOIndex.getImm();
5477     return true;
5478   }
5479   llvm_unreachable("Target dependent opcode missing");
5480 }
5481 
5482 std::pair<unsigned, unsigned>
5483 ARMBaseInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
5484   const unsigned Mask = ARMII::MO_OPTION_MASK;
5485   return std::make_pair(TF & Mask, TF & ~Mask);
5486 }
5487 
5488 ArrayRef<std::pair<unsigned, const char *>>
5489 ARMBaseInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
5490   using namespace ARMII;
5491 
5492   static const std::pair<unsigned, const char *> TargetFlags[] = {
5493       {MO_LO16, "arm-lo16"}, {MO_HI16, "arm-hi16"}};
5494   return makeArrayRef(TargetFlags);
5495 }
5496 
5497 ArrayRef<std::pair<unsigned, const char *>>
5498 ARMBaseInstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const {
5499   using namespace ARMII;
5500 
5501   static const std::pair<unsigned, const char *> TargetFlags[] = {
5502       {MO_COFFSTUB, "arm-coffstub"},
5503       {MO_GOT, "arm-got"},
5504       {MO_SBREL, "arm-sbrel"},
5505       {MO_DLLIMPORT, "arm-dllimport"},
5506       {MO_SECREL, "arm-secrel"},
5507       {MO_NONLAZY, "arm-nonlazy"}};
5508   return makeArrayRef(TargetFlags);
5509 }
5510 
5511 Optional<RegImmPair> ARMBaseInstrInfo::isAddImmediate(const MachineInstr &MI,
5512                                                       Register Reg) const {
5513   int Sign = 1;
5514   unsigned Opcode = MI.getOpcode();
5515   int64_t Offset = 0;
5516 
5517   // TODO: Handle cases where Reg is a super- or sub-register of the
5518   // destination register.
5519   const MachineOperand &Op0 = MI.getOperand(0);
5520   if (!Op0.isReg() || Reg != Op0.getReg())
5521     return None;
5522 
5523   // We describe SUBri or ADDri instructions.
5524   if (Opcode == ARM::SUBri)
5525     Sign = -1;
5526   else if (Opcode != ARM::ADDri)
5527     return None;
5528 
5529   // TODO: Third operand can be global address (usually some string). Since
5530   //       strings can be relocated we cannot calculate their offsets for
5531   //       now.
5532   if (!MI.getOperand(1).isReg() || !MI.getOperand(2).isImm())
5533     return None;
5534 
5535   Offset = MI.getOperand(2).getImm() * Sign;
5536   return RegImmPair{MI.getOperand(1).getReg(), Offset};
5537 }
5538 
5539 bool llvm::registerDefinedBetween(unsigned Reg,
5540                                   MachineBasicBlock::iterator From,
5541                                   MachineBasicBlock::iterator To,
5542                                   const TargetRegisterInfo *TRI) {
5543   for (auto I = From; I != To; ++I)
5544     if (I->modifiesRegister(Reg, TRI))
5545       return true;
5546   return false;
5547 }
5548 
5549 MachineInstr *llvm::findCMPToFoldIntoCBZ(MachineInstr *Br,
5550                                          const TargetRegisterInfo *TRI) {
5551   // Search backwards to the instruction that defines CSPR. This may or not
5552   // be a CMP, we check that after this loop. If we find another instruction
5553   // that reads cpsr, we return nullptr.
5554   MachineBasicBlock::iterator CmpMI = Br;
5555   while (CmpMI != Br->getParent()->begin()) {
5556     --CmpMI;
5557     if (CmpMI->modifiesRegister(ARM::CPSR, TRI))
5558       break;
5559     if (CmpMI->readsRegister(ARM::CPSR, TRI))
5560       break;
5561   }
5562 
5563   // Check that this inst is a CMP r[0-7], #0 and that the register
5564   // is not redefined between the cmp and the br.
5565   if (CmpMI->getOpcode() != ARM::tCMPi8 && CmpMI->getOpcode() != ARM::t2CMPri)
5566     return nullptr;
5567   Register Reg = CmpMI->getOperand(0).getReg();
5568   Register PredReg;
5569   ARMCC::CondCodes Pred = getInstrPredicate(*CmpMI, PredReg);
5570   if (Pred != ARMCC::AL || CmpMI->getOperand(1).getImm() != 0)
5571     return nullptr;
5572   if (!isARMLowRegister(Reg))
5573     return nullptr;
5574   if (registerDefinedBetween(Reg, CmpMI->getNextNode(), Br, TRI))
5575     return nullptr;
5576 
5577   return &*CmpMI;
5578 }
5579 
5580 unsigned llvm::ConstantMaterializationCost(unsigned Val,
5581                                            const ARMSubtarget *Subtarget,
5582                                            bool ForCodesize) {
5583   if (Subtarget->isThumb()) {
5584     if (Val <= 255) // MOV
5585       return ForCodesize ? 2 : 1;
5586     if (Subtarget->hasV6T2Ops() && (Val <= 0xffff ||                    // MOV
5587                                     ARM_AM::getT2SOImmVal(Val) != -1 || // MOVW
5588                                     ARM_AM::getT2SOImmVal(~Val) != -1)) // MVN
5589       return ForCodesize ? 4 : 1;
5590     if (Val <= 510) // MOV + ADDi8
5591       return ForCodesize ? 4 : 2;
5592     if (~Val <= 255) // MOV + MVN
5593       return ForCodesize ? 4 : 2;
5594     if (ARM_AM::isThumbImmShiftedVal(Val)) // MOV + LSL
5595       return ForCodesize ? 4 : 2;
5596   } else {
5597     if (ARM_AM::getSOImmVal(Val) != -1) // MOV
5598       return ForCodesize ? 4 : 1;
5599     if (ARM_AM::getSOImmVal(~Val) != -1) // MVN
5600       return ForCodesize ? 4 : 1;
5601     if (Subtarget->hasV6T2Ops() && Val <= 0xffff) // MOVW
5602       return ForCodesize ? 4 : 1;
5603     if (ARM_AM::isSOImmTwoPartVal(Val)) // two instrs
5604       return ForCodesize ? 8 : 2;
5605     if (ARM_AM::isSOImmTwoPartValNeg(Val)) // two instrs
5606       return ForCodesize ? 8 : 2;
5607   }
5608   if (Subtarget->useMovt()) // MOVW + MOVT
5609     return ForCodesize ? 8 : 2;
5610   return ForCodesize ? 8 : 3; // Literal pool load
5611 }
5612 
5613 bool llvm::HasLowerConstantMaterializationCost(unsigned Val1, unsigned Val2,
5614                                                const ARMSubtarget *Subtarget,
5615                                                bool ForCodesize) {
5616   // Check with ForCodesize
5617   unsigned Cost1 = ConstantMaterializationCost(Val1, Subtarget, ForCodesize);
5618   unsigned Cost2 = ConstantMaterializationCost(Val2, Subtarget, ForCodesize);
5619   if (Cost1 < Cost2)
5620     return true;
5621   if (Cost1 > Cost2)
5622     return false;
5623 
5624   // If they are equal, try with !ForCodesize
5625   return ConstantMaterializationCost(Val1, Subtarget, !ForCodesize) <
5626          ConstantMaterializationCost(Val2, Subtarget, !ForCodesize);
5627 }
5628 
5629 /// Constants defining how certain sequences should be outlined.
5630 /// This encompasses how an outlined function should be called, and what kind of
5631 /// frame should be emitted for that outlined function.
5632 ///
5633 /// \p MachineOutlinerTailCall implies that the function is being created from
5634 /// a sequence of instructions ending in a return.
5635 ///
5636 /// That is,
5637 ///
5638 /// I1                                OUTLINED_FUNCTION:
5639 /// I2    --> B OUTLINED_FUNCTION     I1
5640 /// BX LR                             I2
5641 ///                                   BX LR
5642 ///
5643 /// +-------------------------+--------+-----+
5644 /// |                         | Thumb2 | ARM |
5645 /// +-------------------------+--------+-----+
5646 /// | Call overhead in Bytes  |      4 |   4 |
5647 /// | Frame overhead in Bytes |      0 |   0 |
5648 /// | Stack fixup required    |     No |  No |
5649 /// +-------------------------+--------+-----+
5650 ///
5651 /// \p MachineOutlinerThunk implies that the function is being created from
5652 /// a sequence of instructions ending in a call. The outlined function is
5653 /// called with a BL instruction, and the outlined function tail-calls the
5654 /// original call destination.
5655 ///
5656 /// That is,
5657 ///
5658 /// I1                                OUTLINED_FUNCTION:
5659 /// I2   --> BL OUTLINED_FUNCTION     I1
5660 /// BL f                              I2
5661 ///                                   B f
5662 ///
5663 /// +-------------------------+--------+-----+
5664 /// |                         | Thumb2 | ARM |
5665 /// +-------------------------+--------+-----+
5666 /// | Call overhead in Bytes  |      4 |   4 |
5667 /// | Frame overhead in Bytes |      0 |   0 |
5668 /// | Stack fixup required    |     No |  No |
5669 /// +-------------------------+--------+-----+
5670 ///
5671 /// \p MachineOutlinerNoLRSave implies that the function should be called using
5672 /// a BL instruction, but doesn't require LR to be saved and restored. This
5673 /// happens when LR is known to be dead.
5674 ///
5675 /// That is,
5676 ///
5677 /// I1                                OUTLINED_FUNCTION:
5678 /// I2 --> BL OUTLINED_FUNCTION       I1
5679 /// I3                                I2
5680 ///                                   I3
5681 ///                                   BX LR
5682 ///
5683 /// +-------------------------+--------+-----+
5684 /// |                         | Thumb2 | ARM |
5685 /// +-------------------------+--------+-----+
5686 /// | Call overhead in Bytes  |      4 |   4 |
5687 /// | Frame overhead in Bytes |      4 |   4 |
5688 /// | Stack fixup required    |     No |  No |
5689 /// +-------------------------+--------+-----+
5690 ///
5691 /// \p MachineOutlinerRegSave implies that the function should be called with a
5692 /// save and restore of LR to an available register. This allows us to avoid
5693 /// stack fixups. Note that this outlining variant is compatible with the
5694 /// NoLRSave case.
5695 ///
5696 /// That is,
5697 ///
5698 /// I1     Save LR                    OUTLINED_FUNCTION:
5699 /// I2 --> BL OUTLINED_FUNCTION       I1
5700 /// I3     Restore LR                 I2
5701 ///                                   I3
5702 ///                                   BX LR
5703 ///
5704 /// +-------------------------+--------+-----+
5705 /// |                         | Thumb2 | ARM |
5706 /// +-------------------------+--------+-----+
5707 /// | Call overhead in Bytes  |      8 |  12 |
5708 /// | Frame overhead in Bytes |      2 |   4 |
5709 /// | Stack fixup required    |     No |  No |
5710 /// +-------------------------+--------+-----+
5711 ///
5712 /// \p MachineOutlinerDefault implies that the function should be called with
5713 /// a save and restore of LR to the stack.
5714 ///
5715 /// That is,
5716 ///
5717 /// I1     Save LR                    OUTLINED_FUNCTION:
5718 /// I2 --> BL OUTLINED_FUNCTION       I1
5719 /// I3     Restore LR                 I2
5720 ///                                   I3
5721 ///                                   BX LR
5722 ///
5723 /// +-------------------------+--------+-----+
5724 /// |                         | Thumb2 | ARM |
5725 /// +-------------------------+--------+-----+
5726 /// | Call overhead in Bytes  |      8 |  12 |
5727 /// | Frame overhead in Bytes |      2 |   4 |
5728 /// | Stack fixup required    |    Yes | Yes |
5729 /// +-------------------------+--------+-----+
5730 
5731 enum MachineOutlinerClass {
5732   MachineOutlinerTailCall,
5733   MachineOutlinerThunk,
5734   MachineOutlinerNoLRSave,
5735   MachineOutlinerRegSave,
5736   MachineOutlinerDefault
5737 };
5738 
5739 enum MachineOutlinerMBBFlags {
5740   LRUnavailableSomewhere = 0x2,
5741   HasCalls = 0x4,
5742   UnsafeRegsDead = 0x8
5743 };
5744 
5745 struct OutlinerCosts {
5746   const int CallTailCall;
5747   const int FrameTailCall;
5748   const int CallThunk;
5749   const int FrameThunk;
5750   const int CallNoLRSave;
5751   const int FrameNoLRSave;
5752   const int CallRegSave;
5753   const int FrameRegSave;
5754   const int CallDefault;
5755   const int FrameDefault;
5756   const int SaveRestoreLROnStack;
5757 
5758   OutlinerCosts(const ARMSubtarget &target)
5759       : CallTailCall(target.isThumb() ? 4 : 4),
5760         FrameTailCall(target.isThumb() ? 0 : 0),
5761         CallThunk(target.isThumb() ? 4 : 4),
5762         FrameThunk(target.isThumb() ? 0 : 0),
5763         CallNoLRSave(target.isThumb() ? 4 : 4),
5764         FrameNoLRSave(target.isThumb() ? 4 : 4),
5765         CallRegSave(target.isThumb() ? 8 : 12),
5766         FrameRegSave(target.isThumb() ? 2 : 4),
5767         CallDefault(target.isThumb() ? 8 : 12),
5768         FrameDefault(target.isThumb() ? 2 : 4),
5769         SaveRestoreLROnStack(target.isThumb() ? 8 : 8) {}
5770 };
5771 
5772 unsigned
5773 ARMBaseInstrInfo::findRegisterToSaveLRTo(const outliner::Candidate &C) const {
5774   assert(C.LRUWasSet && "LRU wasn't set?");
5775   MachineFunction *MF = C.getMF();
5776   const ARMBaseRegisterInfo *ARI = static_cast<const ARMBaseRegisterInfo *>(
5777       MF->getSubtarget().getRegisterInfo());
5778 
5779   BitVector regsReserved = ARI->getReservedRegs(*MF);
5780   // Check if there is an available register across the sequence that we can
5781   // use.
5782   for (unsigned Reg : ARM::rGPRRegClass) {
5783     if (!(Reg < regsReserved.size() && regsReserved.test(Reg)) &&
5784         Reg != ARM::LR &&  // LR is not reserved, but don't use it.
5785         Reg != ARM::R12 && // R12 is not guaranteed to be preserved.
5786         C.LRU.available(Reg) && C.UsedInSequence.available(Reg))
5787       return Reg;
5788   }
5789 
5790   // No suitable register. Return 0.
5791   return 0u;
5792 }
5793 
5794 // Compute liveness of LR at the point after the interval [I, E), which
5795 // denotes a *backward* iteration through instructions. Used only for return
5796 // basic blocks, which do not end with a tail call.
5797 static bool isLRAvailable(const TargetRegisterInfo &TRI,
5798                           MachineBasicBlock::reverse_iterator I,
5799                           MachineBasicBlock::reverse_iterator E) {
5800   // At the end of the function LR dead.
5801   bool Live = false;
5802   for (; I != E; ++I) {
5803     const MachineInstr &MI = *I;
5804 
5805     // Check defs of LR.
5806     if (MI.modifiesRegister(ARM::LR, &TRI))
5807       Live = false;
5808 
5809     // Check uses of LR.
5810     unsigned Opcode = MI.getOpcode();
5811     if (Opcode == ARM::BX_RET || Opcode == ARM::MOVPCLR ||
5812         Opcode == ARM::SUBS_PC_LR || Opcode == ARM::tBX_RET ||
5813         Opcode == ARM::tBXNS_RET) {
5814       // These instructions use LR, but it's not an (explicit or implicit)
5815       // operand.
5816       Live = true;
5817       continue;
5818     }
5819     if (MI.readsRegister(ARM::LR, &TRI))
5820       Live = true;
5821   }
5822   return !Live;
5823 }
5824 
5825 outliner::OutlinedFunction ARMBaseInstrInfo::getOutliningCandidateInfo(
5826     std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
5827   outliner::Candidate &FirstCand = RepeatedSequenceLocs[0];
5828   unsigned SequenceSize =
5829       std::accumulate(FirstCand.front(), std::next(FirstCand.back()), 0,
5830                       [this](unsigned Sum, const MachineInstr &MI) {
5831                         return Sum + getInstSizeInBytes(MI);
5832                       });
5833 
5834   // Properties about candidate MBBs that hold for all of them.
5835   unsigned FlagsSetInAll = 0xF;
5836 
5837   // Compute liveness information for each candidate, and set FlagsSetInAll.
5838   const TargetRegisterInfo &TRI = getRegisterInfo();
5839   std::for_each(
5840       RepeatedSequenceLocs.begin(), RepeatedSequenceLocs.end(),
5841       [&FlagsSetInAll](outliner::Candidate &C) { FlagsSetInAll &= C.Flags; });
5842 
5843   // According to the ARM Procedure Call Standard, the following are
5844   // undefined on entry/exit from a function call:
5845   //
5846   // * Register R12(IP),
5847   // * Condition codes (and thus the CPSR register)
5848   //
5849   // Since we control the instructions which are part of the outlined regions
5850   // we don't need to be fully compliant with the AAPCS, but we have to
5851   // guarantee that if a veneer is inserted at link time the code is still
5852   // correct.  Because of this, we can't outline any sequence of instructions
5853   // where one of these registers is live into/across it. Thus, we need to
5854   // delete those candidates.
5855   auto CantGuaranteeValueAcrossCall = [&TRI](outliner::Candidate &C) {
5856     // If the unsafe registers in this block are all dead, then we don't need
5857     // to compute liveness here.
5858     if (C.Flags & UnsafeRegsDead)
5859       return false;
5860     C.initLRU(TRI);
5861     LiveRegUnits LRU = C.LRU;
5862     return (!LRU.available(ARM::R12) || !LRU.available(ARM::CPSR));
5863   };
5864 
5865   // Are there any candidates where those registers are live?
5866   if (!(FlagsSetInAll & UnsafeRegsDead)) {
5867     // Erase every candidate that violates the restrictions above. (It could be
5868     // true that we have viable candidates, so it's not worth bailing out in
5869     // the case that, say, 1 out of 20 candidates violate the restructions.)
5870     llvm::erase_if(RepeatedSequenceLocs, CantGuaranteeValueAcrossCall);
5871 
5872     // If the sequence doesn't have enough candidates left, then we're done.
5873     if (RepeatedSequenceLocs.size() < 2)
5874       return outliner::OutlinedFunction();
5875   }
5876 
5877   // At this point, we have only "safe" candidates to outline. Figure out
5878   // frame + call instruction information.
5879 
5880   unsigned LastInstrOpcode = RepeatedSequenceLocs[0].back()->getOpcode();
5881 
5882   // Helper lambda which sets call information for every candidate.
5883   auto SetCandidateCallInfo =
5884       [&RepeatedSequenceLocs](unsigned CallID, unsigned NumBytesForCall) {
5885         for (outliner::Candidate &C : RepeatedSequenceLocs)
5886           C.setCallInfo(CallID, NumBytesForCall);
5887       };
5888 
5889   OutlinerCosts Costs(Subtarget);
5890   unsigned FrameID = MachineOutlinerDefault;
5891   unsigned NumBytesToCreateFrame = Costs.FrameDefault;
5892 
5893   // If the last instruction in any candidate is a terminator, then we should
5894   // tail call all of the candidates.
5895   if (RepeatedSequenceLocs[0].back()->isTerminator()) {
5896     FrameID = MachineOutlinerTailCall;
5897     NumBytesToCreateFrame = Costs.FrameTailCall;
5898     SetCandidateCallInfo(MachineOutlinerTailCall, Costs.CallTailCall);
5899   } else if (LastInstrOpcode == ARM::BL || LastInstrOpcode == ARM::BLX ||
5900              LastInstrOpcode == ARM::BLX_noip || LastInstrOpcode == ARM::tBL ||
5901              LastInstrOpcode == ARM::tBLXr ||
5902              LastInstrOpcode == ARM::tBLXr_noip ||
5903              LastInstrOpcode == ARM::tBLXi) {
5904     FrameID = MachineOutlinerThunk;
5905     NumBytesToCreateFrame = Costs.FrameThunk;
5906     SetCandidateCallInfo(MachineOutlinerThunk, Costs.CallThunk);
5907   } else {
5908     // We need to decide how to emit calls + frames. We can always emit the same
5909     // frame if we don't need to save to the stack. If we have to save to the
5910     // stack, then we need a different frame.
5911     unsigned NumBytesNoStackCalls = 0;
5912     std::vector<outliner::Candidate> CandidatesWithoutStackFixups;
5913 
5914     for (outliner::Candidate &C : RepeatedSequenceLocs) {
5915       C.initLRU(TRI);
5916       // LR liveness is overestimated in return blocks, unless they end with a
5917       // tail call.
5918       const auto Last = C.getMBB()->rbegin();
5919       const bool LRIsAvailable =
5920           C.getMBB()->isReturnBlock() && !Last->isCall()
5921               ? isLRAvailable(TRI, Last,
5922                               (MachineBasicBlock::reverse_iterator)C.front())
5923               : C.LRU.available(ARM::LR);
5924       if (LRIsAvailable) {
5925         FrameID = MachineOutlinerNoLRSave;
5926         NumBytesNoStackCalls += Costs.CallNoLRSave;
5927         C.setCallInfo(MachineOutlinerNoLRSave, Costs.CallNoLRSave);
5928         CandidatesWithoutStackFixups.push_back(C);
5929       }
5930 
5931       // Is an unused register available? If so, we won't modify the stack, so
5932       // we can outline with the same frame type as those that don't save LR.
5933       else if (findRegisterToSaveLRTo(C)) {
5934         FrameID = MachineOutlinerRegSave;
5935         NumBytesNoStackCalls += Costs.CallRegSave;
5936         C.setCallInfo(MachineOutlinerRegSave, Costs.CallRegSave);
5937         CandidatesWithoutStackFixups.push_back(C);
5938       }
5939 
5940       // Is SP used in the sequence at all? If not, we don't have to modify
5941       // the stack, so we are guaranteed to get the same frame.
5942       else if (C.UsedInSequence.available(ARM::SP)) {
5943         NumBytesNoStackCalls += Costs.CallDefault;
5944         C.setCallInfo(MachineOutlinerDefault, Costs.CallDefault);
5945         CandidatesWithoutStackFixups.push_back(C);
5946       }
5947 
5948       // If we outline this, we need to modify the stack. Pretend we don't
5949       // outline this by saving all of its bytes.
5950       else
5951         NumBytesNoStackCalls += SequenceSize;
5952     }
5953 
5954     // If there are no places where we have to save LR, then note that we don't
5955     // have to update the stack. Otherwise, give every candidate the default
5956     // call type
5957     if (NumBytesNoStackCalls <=
5958         RepeatedSequenceLocs.size() * Costs.CallDefault) {
5959       RepeatedSequenceLocs = CandidatesWithoutStackFixups;
5960       FrameID = MachineOutlinerNoLRSave;
5961     } else
5962       SetCandidateCallInfo(MachineOutlinerDefault, Costs.CallDefault);
5963   }
5964 
5965   // Does every candidate's MBB contain a call?  If so, then we might have a
5966   // call in the range.
5967   if (FlagsSetInAll & MachineOutlinerMBBFlags::HasCalls) {
5968     // check if the range contains a call.  These require a save + restore of
5969     // the link register.
5970     if (std::any_of(FirstCand.front(), FirstCand.back(),
5971                     [](const MachineInstr &MI) { return MI.isCall(); }))
5972       NumBytesToCreateFrame += Costs.SaveRestoreLROnStack;
5973 
5974     // Handle the last instruction separately.  If it is tail call, then the
5975     // last instruction is a call, we don't want to save + restore in this
5976     // case.  However, it could be possible that the last instruction is a
5977     // call without it being valid to tail call this sequence.  We should
5978     // consider this as well.
5979     else if (FrameID != MachineOutlinerThunk &&
5980              FrameID != MachineOutlinerTailCall && FirstCand.back()->isCall())
5981       NumBytesToCreateFrame += Costs.SaveRestoreLROnStack;
5982   }
5983 
5984   return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
5985                                     NumBytesToCreateFrame, FrameID);
5986 }
5987 
5988 bool ARMBaseInstrInfo::checkAndUpdateStackOffset(MachineInstr *MI,
5989                                                  int64_t Fixup,
5990                                                  bool Updt) const {
5991   int SPIdx = MI->findRegisterUseOperandIdx(ARM::SP);
5992   unsigned AddrMode = (MI->getDesc().TSFlags & ARMII::AddrModeMask);
5993   if (SPIdx < 0)
5994     // No SP operand
5995     return true;
5996   else if (SPIdx != 1 && (AddrMode != ARMII::AddrModeT2_i8s4 || SPIdx != 2))
5997     // If SP is not the base register we can't do much
5998     return false;
5999 
6000   // Stack might be involved but addressing mode doesn't handle any offset.
6001   // Rq: AddrModeT1_[1|2|4] don't operate on SP
6002   if (AddrMode == ARMII::AddrMode1        // Arithmetic instructions
6003       || AddrMode == ARMII::AddrMode4     // Load/Store Multiple
6004       || AddrMode == ARMII::AddrMode6     // Neon Load/Store Multiple
6005       || AddrMode == ARMII::AddrModeT2_so // SP can't be used as based register
6006       || AddrMode == ARMII::AddrModeT2_pc // PCrel access
6007       || AddrMode == ARMII::AddrMode2     // Used by PRE and POST indexed LD/ST
6008       || AddrMode == ARMII::AddrModeT2_i7 // v8.1-M MVE
6009       || AddrMode == ARMII::AddrModeT2_i7s2 // v8.1-M MVE
6010       || AddrMode == ARMII::AddrModeT2_i7s4 // v8.1-M sys regs VLDR/VSTR
6011       || AddrMode == ARMII::AddrModeNone)
6012     return false;
6013 
6014   unsigned NumOps = MI->getDesc().getNumOperands();
6015   unsigned ImmIdx = NumOps - 3;
6016 
6017   const MachineOperand &Offset = MI->getOperand(ImmIdx);
6018   assert(Offset.isImm() && "Is not an immediate");
6019   int64_t OffVal = Offset.getImm();
6020 
6021   if (OffVal < 0)
6022     // Don't override data if the are below SP.
6023     return false;
6024 
6025   unsigned NumBits = 0;
6026   unsigned Scale = 1;
6027 
6028   switch (AddrMode) {
6029   case ARMII::AddrMode3:
6030     if (ARM_AM::getAM3Op(OffVal) == ARM_AM::sub)
6031       return false;
6032     OffVal = ARM_AM::getAM3Offset(OffVal);
6033     NumBits = 8;
6034     break;
6035   case ARMII::AddrMode5:
6036     if (ARM_AM::getAM5Op(OffVal) == ARM_AM::sub)
6037       return false;
6038     OffVal = ARM_AM::getAM5Offset(OffVal);
6039     NumBits = 8;
6040     Scale = 4;
6041     break;
6042   case ARMII::AddrMode5FP16:
6043     if (ARM_AM::getAM5FP16Op(OffVal) == ARM_AM::sub)
6044       return false;
6045     OffVal = ARM_AM::getAM5FP16Offset(OffVal);
6046     NumBits = 8;
6047     Scale = 2;
6048     break;
6049   case ARMII::AddrModeT2_i8:
6050     NumBits = 8;
6051     break;
6052   case ARMII::AddrModeT2_i8s4:
6053     // FIXME: Values are already scaled in this addressing mode.
6054     assert((Fixup & 3) == 0 && "Can't encode this offset!");
6055     NumBits = 10;
6056     break;
6057   case ARMII::AddrModeT2_ldrex:
6058     NumBits = 8;
6059     Scale = 4;
6060     break;
6061   case ARMII::AddrModeT2_i12:
6062   case ARMII::AddrMode_i12:
6063     NumBits = 12;
6064     break;
6065   case ARMII::AddrModeT1_s: // SP-relative LD/ST
6066     NumBits = 8;
6067     Scale = 4;
6068     break;
6069   default:
6070     llvm_unreachable("Unsupported addressing mode!");
6071   }
6072   // Make sure the offset is encodable for instructions that scale the
6073   // immediate.
6074   assert(((OffVal * Scale + Fixup) & (Scale - 1)) == 0 &&
6075          "Can't encode this offset!");
6076   OffVal += Fixup / Scale;
6077 
6078   unsigned Mask = (1 << NumBits) - 1;
6079 
6080   if (OffVal <= Mask) {
6081     if (Updt)
6082       MI->getOperand(ImmIdx).setImm(OffVal);
6083     return true;
6084   }
6085 
6086   return false;
6087 
6088 }
6089 
6090 bool ARMBaseInstrInfo::isFunctionSafeToOutlineFrom(
6091     MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {
6092   const Function &F = MF.getFunction();
6093 
6094   // Can F be deduplicated by the linker? If it can, don't outline from it.
6095   if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
6096     return false;
6097 
6098   // Don't outline from functions with section markings; the program could
6099   // expect that all the code is in the named section.
6100   // FIXME: Allow outlining from multiple functions with the same section
6101   // marking.
6102   if (F.hasSection())
6103     return false;
6104 
6105   // FIXME: Thumb1 outlining is not handled
6106   if (MF.getInfo<ARMFunctionInfo>()->isThumb1OnlyFunction())
6107     return false;
6108 
6109   // It's safe to outline from MF.
6110   return true;
6111 }
6112 
6113 bool ARMBaseInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
6114                                               unsigned &Flags) const {
6115   // Check if LR is available through all of the MBB. If it's not, then set
6116   // a flag.
6117   assert(MBB.getParent()->getRegInfo().tracksLiveness() &&
6118          "Suitable Machine Function for outlining must track liveness");
6119 
6120   LiveRegUnits LRU(getRegisterInfo());
6121 
6122   std::for_each(MBB.rbegin(), MBB.rend(),
6123                 [&LRU](MachineInstr &MI) { LRU.accumulate(MI); });
6124 
6125   // Check if each of the unsafe registers are available...
6126   bool R12AvailableInBlock = LRU.available(ARM::R12);
6127   bool CPSRAvailableInBlock = LRU.available(ARM::CPSR);
6128 
6129   // If all of these are dead (and not live out), we know we don't have to check
6130   // them later.
6131   if (R12AvailableInBlock && CPSRAvailableInBlock)
6132     Flags |= MachineOutlinerMBBFlags::UnsafeRegsDead;
6133 
6134   // Now, add the live outs to the set.
6135   LRU.addLiveOuts(MBB);
6136 
6137   // If any of these registers is available in the MBB, but also a live out of
6138   // the block, then we know outlining is unsafe.
6139   if (R12AvailableInBlock && !LRU.available(ARM::R12))
6140     return false;
6141   if (CPSRAvailableInBlock && !LRU.available(ARM::CPSR))
6142     return false;
6143 
6144   // Check if there's a call inside this MachineBasicBlock.  If there is, then
6145   // set a flag.
6146   if (any_of(MBB, [](MachineInstr &MI) { return MI.isCall(); }))
6147     Flags |= MachineOutlinerMBBFlags::HasCalls;
6148 
6149   // LR liveness is overestimated in return blocks.
6150 
6151   bool LRIsAvailable =
6152       MBB.isReturnBlock() && !MBB.back().isCall()
6153           ? isLRAvailable(getRegisterInfo(), MBB.rbegin(), MBB.rend())
6154           : LRU.available(ARM::LR);
6155   if (!LRIsAvailable)
6156     Flags |= MachineOutlinerMBBFlags::LRUnavailableSomewhere;
6157 
6158   return true;
6159 }
6160 
6161 outliner::InstrType
6162 ARMBaseInstrInfo::getOutliningType(MachineBasicBlock::iterator &MIT,
6163                                    unsigned Flags) const {
6164   MachineInstr &MI = *MIT;
6165   const TargetRegisterInfo *TRI = &getRegisterInfo();
6166 
6167   // Be conservative with inline ASM
6168   if (MI.isInlineAsm())
6169     return outliner::InstrType::Illegal;
6170 
6171   // Don't allow debug values to impact outlining type.
6172   if (MI.isDebugInstr() || MI.isIndirectDebugValue())
6173     return outliner::InstrType::Invisible;
6174 
6175   // At this point, KILL or IMPLICIT_DEF instructions don't really tell us much
6176   // so we can go ahead and skip over them.
6177   if (MI.isKill() || MI.isImplicitDef())
6178     return outliner::InstrType::Invisible;
6179 
6180   // PIC instructions contain labels, outlining them would break offset
6181   // computing.  unsigned Opc = MI.getOpcode();
6182   unsigned Opc = MI.getOpcode();
6183   if (Opc == ARM::tPICADD || Opc == ARM::PICADD || Opc == ARM::PICSTR ||
6184       Opc == ARM::PICSTRB || Opc == ARM::PICSTRH || Opc == ARM::PICLDR ||
6185       Opc == ARM::PICLDRB || Opc == ARM::PICLDRH || Opc == ARM::PICLDRSB ||
6186       Opc == ARM::PICLDRSH || Opc == ARM::t2LDRpci_pic ||
6187       Opc == ARM::t2MOVi16_ga_pcrel || Opc == ARM::t2MOVTi16_ga_pcrel ||
6188       Opc == ARM::t2MOV_ga_pcrel)
6189     return outliner::InstrType::Illegal;
6190 
6191   // Be conservative with ARMv8.1 MVE instructions.
6192   if (Opc == ARM::t2BF_LabelPseudo || Opc == ARM::t2DoLoopStart ||
6193       Opc == ARM::t2DoLoopStartTP || Opc == ARM::t2WhileLoopStart ||
6194       Opc == ARM::t2WhileLoopStartLR || Opc == ARM::t2WhileLoopStartTP ||
6195       Opc == ARM::t2LoopDec || Opc == ARM::t2LoopEnd ||
6196       Opc == ARM::t2LoopEndDec)
6197     return outliner::InstrType::Illegal;
6198 
6199   const MCInstrDesc &MCID = MI.getDesc();
6200   uint64_t MIFlags = MCID.TSFlags;
6201   if ((MIFlags & ARMII::DomainMask) == ARMII::DomainMVE)
6202     return outliner::InstrType::Illegal;
6203 
6204   // Is this a terminator for a basic block?
6205   if (MI.isTerminator()) {
6206     // Don't outline if the branch is not unconditional.
6207     if (isPredicated(MI))
6208       return outliner::InstrType::Illegal;
6209 
6210     // Is this the end of a function?
6211     if (MI.getParent()->succ_empty())
6212       return outliner::InstrType::Legal;
6213 
6214     // It's not, so don't outline it.
6215     return outliner::InstrType::Illegal;
6216   }
6217 
6218   // Make sure none of the operands are un-outlinable.
6219   for (const MachineOperand &MOP : MI.operands()) {
6220     if (MOP.isCPI() || MOP.isJTI() || MOP.isCFIIndex() || MOP.isFI() ||
6221         MOP.isTargetIndex())
6222       return outliner::InstrType::Illegal;
6223   }
6224 
6225   // Don't outline if link register or program counter value are used.
6226   if (MI.readsRegister(ARM::LR, TRI) || MI.readsRegister(ARM::PC, TRI))
6227     return outliner::InstrType::Illegal;
6228 
6229   if (MI.isCall()) {
6230     // Get the function associated with the call.  Look at each operand and find
6231     // the one that represents the calle and get its name.
6232     const Function *Callee = nullptr;
6233     for (const MachineOperand &MOP : MI.operands()) {
6234       if (MOP.isGlobal()) {
6235         Callee = dyn_cast<Function>(MOP.getGlobal());
6236         break;
6237       }
6238     }
6239 
6240     // Dont't outline calls to "mcount" like functions, in particular Linux
6241     // kernel function tracing relies on it.
6242     if (Callee &&
6243         (Callee->getName() == "\01__gnu_mcount_nc" ||
6244          Callee->getName() == "\01mcount" || Callee->getName() == "__mcount"))
6245       return outliner::InstrType::Illegal;
6246 
6247     // If we don't know anything about the callee, assume it depends on the
6248     // stack layout of the caller. In that case, it's only legal to outline
6249     // as a tail-call. Explicitly list the call instructions we know about so
6250     // we don't get unexpected results with call pseudo-instructions.
6251     auto UnknownCallOutlineType = outliner::InstrType::Illegal;
6252     if (Opc == ARM::BL || Opc == ARM::tBL || Opc == ARM::BLX ||
6253         Opc == ARM::BLX_noip || Opc == ARM::tBLXr || Opc == ARM::tBLXr_noip ||
6254         Opc == ARM::tBLXi)
6255       UnknownCallOutlineType = outliner::InstrType::LegalTerminator;
6256 
6257     if (!Callee)
6258       return UnknownCallOutlineType;
6259 
6260     // We have a function we have information about.  Check if it's something we
6261     // can safely outline.
6262     MachineFunction *MF = MI.getParent()->getParent();
6263     MachineFunction *CalleeMF = MF->getMMI().getMachineFunction(*Callee);
6264 
6265     // We don't know what's going on with the callee at all.  Don't touch it.
6266     if (!CalleeMF)
6267       return UnknownCallOutlineType;
6268 
6269     // Check if we know anything about the callee saves on the function. If we
6270     // don't, then don't touch it, since that implies that we haven't computed
6271     // anything about its stack frame yet.
6272     MachineFrameInfo &MFI = CalleeMF->getFrameInfo();
6273     if (!MFI.isCalleeSavedInfoValid() || MFI.getStackSize() > 0 ||
6274         MFI.getNumObjects() > 0)
6275       return UnknownCallOutlineType;
6276 
6277     // At this point, we can say that CalleeMF ought to not pass anything on the
6278     // stack. Therefore, we can outline it.
6279     return outliner::InstrType::Legal;
6280   }
6281 
6282   // Since calls are handled, don't touch LR or PC
6283   if (MI.modifiesRegister(ARM::LR, TRI) || MI.modifiesRegister(ARM::PC, TRI))
6284     return outliner::InstrType::Illegal;
6285 
6286   // Does this use the stack?
6287   if (MI.modifiesRegister(ARM::SP, TRI) || MI.readsRegister(ARM::SP, TRI)) {
6288     // True if there is no chance that any outlined candidate from this range
6289     // could require stack fixups. That is, both
6290     // * LR is available in the range (No save/restore around call)
6291     // * The range doesn't include calls (No save/restore in outlined frame)
6292     // are true.
6293     // FIXME: This is very restrictive; the flags check the whole block,
6294     // not just the bit we will try to outline.
6295     bool MightNeedStackFixUp =
6296         (Flags & (MachineOutlinerMBBFlags::LRUnavailableSomewhere |
6297                   MachineOutlinerMBBFlags::HasCalls));
6298 
6299     if (!MightNeedStackFixUp)
6300       return outliner::InstrType::Legal;
6301 
6302     // Any modification of SP will break our code to save/restore LR.
6303     // FIXME: We could handle some instructions which add a constant offset to
6304     // SP, with a bit more work.
6305     if (MI.modifiesRegister(ARM::SP, TRI))
6306       return outliner::InstrType::Illegal;
6307 
6308     // At this point, we have a stack instruction that we might need to fix up.
6309     // up. We'll handle it if it's a load or store.
6310     if (checkAndUpdateStackOffset(&MI, Subtarget.getStackAlignment().value(),
6311                                   false))
6312       return outliner::InstrType::Legal;
6313 
6314     // We can't fix it up, so don't outline it.
6315     return outliner::InstrType::Illegal;
6316   }
6317 
6318   // Be conservative with IT blocks.
6319   if (MI.readsRegister(ARM::ITSTATE, TRI) ||
6320       MI.modifiesRegister(ARM::ITSTATE, TRI))
6321     return outliner::InstrType::Illegal;
6322 
6323   // Don't outline positions.
6324   if (MI.isPosition())
6325     return outliner::InstrType::Illegal;
6326 
6327   return outliner::InstrType::Legal;
6328 }
6329 
6330 void ARMBaseInstrInfo::fixupPostOutline(MachineBasicBlock &MBB) const {
6331   for (MachineInstr &MI : MBB) {
6332     checkAndUpdateStackOffset(&MI, Subtarget.getStackAlignment().value(), true);
6333   }
6334 }
6335 
6336 void ARMBaseInstrInfo::saveLROnStack(MachineBasicBlock &MBB,
6337                                      MachineBasicBlock::iterator It) const {
6338   unsigned Opc = Subtarget.isThumb() ? ARM::t2STR_PRE : ARM::STR_PRE_IMM;
6339   int Align = -Subtarget.getStackAlignment().value();
6340   BuildMI(MBB, It, DebugLoc(), get(Opc), ARM::SP)
6341     .addReg(ARM::LR, RegState::Kill)
6342     .addReg(ARM::SP)
6343     .addImm(Align)
6344     .add(predOps(ARMCC::AL));
6345 }
6346 
6347 void ARMBaseInstrInfo::emitCFIForLRSaveOnStack(
6348     MachineBasicBlock &MBB, MachineBasicBlock::iterator It) const {
6349   MachineFunction &MF = *MBB.getParent();
6350   const MCRegisterInfo *MRI = Subtarget.getRegisterInfo();
6351   unsigned DwarfLR = MRI->getDwarfRegNum(ARM::LR, true);
6352   int Align = Subtarget.getStackAlignment().value();
6353   // Add a CFI saying the stack was moved down.
6354   int64_t StackPosEntry =
6355       MF.addFrameInst(MCCFIInstruction::cfiDefCfaOffset(nullptr, Align));
6356   BuildMI(MBB, It, DebugLoc(), get(ARM::CFI_INSTRUCTION))
6357       .addCFIIndex(StackPosEntry)
6358       .setMIFlags(MachineInstr::FrameSetup);
6359 
6360   // Add a CFI saying that the LR that we want to find is now higher than
6361   // before.
6362   int64_t LRPosEntry =
6363       MF.addFrameInst(MCCFIInstruction::createOffset(nullptr, DwarfLR, -Align));
6364   BuildMI(MBB, It, DebugLoc(), get(ARM::CFI_INSTRUCTION))
6365       .addCFIIndex(LRPosEntry)
6366       .setMIFlags(MachineInstr::FrameSetup);
6367 }
6368 
6369 void ARMBaseInstrInfo::emitCFIForLRSaveToReg(MachineBasicBlock &MBB,
6370                                              MachineBasicBlock::iterator It,
6371                                              Register Reg) const {
6372   MachineFunction &MF = *MBB.getParent();
6373   const MCRegisterInfo *MRI = Subtarget.getRegisterInfo();
6374   unsigned DwarfLR = MRI->getDwarfRegNum(ARM::LR, true);
6375   unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
6376 
6377   int64_t LRPosEntry = MF.addFrameInst(
6378       MCCFIInstruction::createRegister(nullptr, DwarfLR, DwarfReg));
6379   BuildMI(MBB, It, DebugLoc(), get(ARM::CFI_INSTRUCTION))
6380       .addCFIIndex(LRPosEntry)
6381       .setMIFlags(MachineInstr::FrameSetup);
6382 }
6383 
6384 void ARMBaseInstrInfo::restoreLRFromStack(
6385     MachineBasicBlock &MBB, MachineBasicBlock::iterator It) const {
6386   unsigned Opc = Subtarget.isThumb() ? ARM::t2LDR_POST : ARM::LDR_POST_IMM;
6387   MachineInstrBuilder MIB = BuildMI(MBB, It, DebugLoc(), get(Opc), ARM::LR)
6388     .addReg(ARM::SP, RegState::Define)
6389     .addReg(ARM::SP);
6390   if (!Subtarget.isThumb())
6391     MIB.addReg(0);
6392   MIB.addImm(Subtarget.getStackAlignment().value()).add(predOps(ARMCC::AL));
6393 }
6394 
6395 void ARMBaseInstrInfo::emitCFIForLRRestoreFromStack(
6396     MachineBasicBlock &MBB, MachineBasicBlock::iterator It) const {
6397   // Now stack has moved back up...
6398   MachineFunction &MF = *MBB.getParent();
6399   const MCRegisterInfo *MRI = Subtarget.getRegisterInfo();
6400   unsigned DwarfLR = MRI->getDwarfRegNum(ARM::LR, true);
6401   int64_t StackPosEntry =
6402       MF.addFrameInst(MCCFIInstruction::cfiDefCfaOffset(nullptr, 0));
6403   BuildMI(MBB, It, DebugLoc(), get(ARM::CFI_INSTRUCTION))
6404       .addCFIIndex(StackPosEntry)
6405       .setMIFlags(MachineInstr::FrameDestroy);
6406 
6407   // ... and we have restored LR.
6408   int64_t LRPosEntry =
6409       MF.addFrameInst(MCCFIInstruction::createRestore(nullptr, DwarfLR));
6410   BuildMI(MBB, It, DebugLoc(), get(ARM::CFI_INSTRUCTION))
6411       .addCFIIndex(LRPosEntry)
6412       .setMIFlags(MachineInstr::FrameDestroy);
6413 }
6414 
6415 void ARMBaseInstrInfo::emitCFIForLRRestoreFromReg(
6416     MachineBasicBlock &MBB, MachineBasicBlock::iterator It) const {
6417   MachineFunction &MF = *MBB.getParent();
6418   const MCRegisterInfo *MRI = Subtarget.getRegisterInfo();
6419   unsigned DwarfLR = MRI->getDwarfRegNum(ARM::LR, true);
6420 
6421   int64_t LRPosEntry =
6422       MF.addFrameInst(MCCFIInstruction::createRestore(nullptr, DwarfLR));
6423   BuildMI(MBB, It, DebugLoc(), get(ARM::CFI_INSTRUCTION))
6424       .addCFIIndex(LRPosEntry)
6425       .setMIFlags(MachineInstr::FrameDestroy);
6426 }
6427 
6428 void ARMBaseInstrInfo::buildOutlinedFrame(
6429     MachineBasicBlock &MBB, MachineFunction &MF,
6430     const outliner::OutlinedFunction &OF) const {
6431   // For thunk outlining, rewrite the last instruction from a call to a
6432   // tail-call.
6433   if (OF.FrameConstructionID == MachineOutlinerThunk) {
6434     MachineInstr *Call = &*--MBB.instr_end();
6435     bool isThumb = Subtarget.isThumb();
6436     unsigned FuncOp = isThumb ? 2 : 0;
6437     unsigned Opc = Call->getOperand(FuncOp).isReg()
6438                        ? isThumb ? ARM::tTAILJMPr : ARM::TAILJMPr
6439                        : isThumb ? Subtarget.isTargetMachO() ? ARM::tTAILJMPd
6440                                                              : ARM::tTAILJMPdND
6441                                  : ARM::TAILJMPd;
6442     MachineInstrBuilder MIB = BuildMI(MBB, MBB.end(), DebugLoc(), get(Opc))
6443                                   .add(Call->getOperand(FuncOp));
6444     if (isThumb && !Call->getOperand(FuncOp).isReg())
6445       MIB.add(predOps(ARMCC::AL));
6446     Call->eraseFromParent();
6447   }
6448 
6449   // Is there a call in the outlined range?
6450   auto IsNonTailCall = [](MachineInstr &MI) {
6451     return MI.isCall() && !MI.isReturn();
6452   };
6453   if (llvm::any_of(MBB.instrs(), IsNonTailCall)) {
6454     MachineBasicBlock::iterator It = MBB.begin();
6455     MachineBasicBlock::iterator Et = MBB.end();
6456 
6457     if (OF.FrameConstructionID == MachineOutlinerTailCall ||
6458         OF.FrameConstructionID == MachineOutlinerThunk)
6459       Et = std::prev(MBB.end());
6460 
6461     // We have to save and restore LR, we need to add it to the liveins if it
6462     // is not already part of the set.  This is suffient since outlined
6463     // functions only have one block.
6464     if (!MBB.isLiveIn(ARM::LR))
6465       MBB.addLiveIn(ARM::LR);
6466 
6467     // Insert a save before the outlined region
6468     saveLROnStack(MBB, It);
6469     emitCFIForLRSaveOnStack(MBB, It);
6470 
6471     // Fix up the instructions in the range, since we're going to modify the
6472     // stack.
6473     assert(OF.FrameConstructionID != MachineOutlinerDefault &&
6474            "Can only fix up stack references once");
6475     fixupPostOutline(MBB);
6476 
6477     // Insert a restore before the terminator for the function.  Restore LR.
6478     restoreLRFromStack(MBB, Et);
6479     emitCFIForLRRestoreFromStack(MBB, Et);
6480   }
6481 
6482   // If this is a tail call outlined function, then there's already a return.
6483   if (OF.FrameConstructionID == MachineOutlinerTailCall ||
6484       OF.FrameConstructionID == MachineOutlinerThunk)
6485     return;
6486 
6487   // Here we have to insert the return ourselves.  Get the correct opcode from
6488   // current feature set.
6489   BuildMI(MBB, MBB.end(), DebugLoc(), get(Subtarget.getReturnOpcode()))
6490       .add(predOps(ARMCC::AL));
6491 
6492   // Did we have to modify the stack by saving the link register?
6493   if (OF.FrameConstructionID != MachineOutlinerDefault &&
6494       OF.Candidates[0].CallConstructionID != MachineOutlinerDefault)
6495     return;
6496 
6497   // We modified the stack.
6498   // Walk over the basic block and fix up all the stack accesses.
6499   fixupPostOutline(MBB);
6500 }
6501 
6502 MachineBasicBlock::iterator ARMBaseInstrInfo::insertOutlinedCall(
6503     Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It,
6504     MachineFunction &MF, const outliner::Candidate &C) const {
6505   MachineInstrBuilder MIB;
6506   MachineBasicBlock::iterator CallPt;
6507   unsigned Opc;
6508   bool isThumb = Subtarget.isThumb();
6509 
6510   // Are we tail calling?
6511   if (C.CallConstructionID == MachineOutlinerTailCall) {
6512     // If yes, then we can just branch to the label.
6513     Opc = isThumb
6514               ? Subtarget.isTargetMachO() ? ARM::tTAILJMPd : ARM::tTAILJMPdND
6515               : ARM::TAILJMPd;
6516     MIB = BuildMI(MF, DebugLoc(), get(Opc))
6517               .addGlobalAddress(M.getNamedValue(MF.getName()));
6518     if (isThumb)
6519       MIB.add(predOps(ARMCC::AL));
6520     It = MBB.insert(It, MIB);
6521     return It;
6522   }
6523 
6524   // Create the call instruction.
6525   Opc = isThumb ? ARM::tBL : ARM::BL;
6526   MachineInstrBuilder CallMIB = BuildMI(MF, DebugLoc(), get(Opc));
6527   if (isThumb)
6528     CallMIB.add(predOps(ARMCC::AL));
6529   CallMIB.addGlobalAddress(M.getNamedValue(MF.getName()));
6530 
6531   if (C.CallConstructionID == MachineOutlinerNoLRSave ||
6532       C.CallConstructionID == MachineOutlinerThunk) {
6533     // No, so just insert the call.
6534     It = MBB.insert(It, CallMIB);
6535     return It;
6536   }
6537 
6538   const ARMFunctionInfo &AFI = *C.getMF()->getInfo<ARMFunctionInfo>();
6539   // Can we save to a register?
6540   if (C.CallConstructionID == MachineOutlinerRegSave) {
6541     unsigned Reg = findRegisterToSaveLRTo(C);
6542     assert(Reg != 0 && "No callee-saved register available?");
6543 
6544     // Save and restore LR from that register.
6545     copyPhysReg(MBB, It, DebugLoc(), Reg, ARM::LR, true);
6546     if (!AFI.isLRSpilled())
6547       emitCFIForLRSaveToReg(MBB, It, Reg);
6548     CallPt = MBB.insert(It, CallMIB);
6549     copyPhysReg(MBB, It, DebugLoc(), ARM::LR, Reg, true);
6550     if (!AFI.isLRSpilled())
6551       emitCFIForLRRestoreFromReg(MBB, It);
6552     It--;
6553     return CallPt;
6554   }
6555   // We have the default case. Save and restore from SP.
6556   if (!MBB.isLiveIn(ARM::LR))
6557     MBB.addLiveIn(ARM::LR);
6558   saveLROnStack(MBB, It);
6559   if (!AFI.isLRSpilled())
6560     emitCFIForLRSaveOnStack(MBB, It);
6561   CallPt = MBB.insert(It, CallMIB);
6562   restoreLRFromStack(MBB, It);
6563   if (!AFI.isLRSpilled())
6564     emitCFIForLRRestoreFromStack(MBB, It);
6565   It--;
6566   return CallPt;
6567 }
6568 
6569 bool ARMBaseInstrInfo::shouldOutlineFromFunctionByDefault(
6570     MachineFunction &MF) const {
6571   return Subtarget.isMClass() && MF.getFunction().hasMinSize();
6572 }
6573 
6574 bool ARMBaseInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI,
6575                                                          AAResults *AA) const {
6576   // Try hard to rematerialize any VCTPs because if we spill P0, it will block
6577   // the tail predication conversion. This means that the element count
6578   // register has to be live for longer, but that has to be better than
6579   // spill/restore and VPT predication.
6580   return isVCTP(&MI) && !isPredicated(MI);
6581 }
6582 
6583 unsigned llvm::getBLXOpcode(const MachineFunction &MF) {
6584   return (MF.getSubtarget<ARMSubtarget>().hardenSlsBlr()) ? ARM::BLX_noip
6585                                                           : ARM::BLX;
6586 }
6587 
6588 unsigned llvm::gettBLXrOpcode(const MachineFunction &MF) {
6589   return (MF.getSubtarget<ARMSubtarget>().hardenSlsBlr()) ? ARM::tBLXr_noip
6590                                                           : ARM::tBLXr;
6591 }
6592 
6593 unsigned llvm::getBLXpredOpcode(const MachineFunction &MF) {
6594   return (MF.getSubtarget<ARMSubtarget>().hardenSlsBlr()) ? ARM::BLX_pred_noip
6595                                                           : ARM::BLX_pred;
6596 }
6597 
6598