1 //=- AArch64LoadStoreOptimizer.cpp - AArch64 load/store opt. pass -*- C++ -*-=//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains a pass that performs load / store related peephole
11 // optimizations. This pass should be run after register allocation.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "AArch64InstrInfo.h"
16 #include "AArch64Subtarget.h"
17 #include "MCTargetDesc/AArch64AddressingModes.h"
18 #include "llvm/ADT/BitVector.h"
19 #include "llvm/ADT/iterator_range.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/ADT/StringRef.h"
23 #include "llvm/CodeGen/MachineBasicBlock.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineFunctionPass.h"
26 #include "llvm/CodeGen/MachineInstr.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineOperand.h"
29 #include "llvm/IR/DebugLoc.h"
30 #include "llvm/MC/MCRegisterInfo.h"
31 #include "llvm/Pass.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Support/raw_ostream.h"
36 #include "llvm/Target/TargetRegisterInfo.h"
37 #include <cassert>
38 #include <cstdint>
39 #include <iterator>
40 #include <limits>
41 
42 using namespace llvm;
43 
44 #define DEBUG_TYPE "aarch64-ldst-opt"
45 
46 STATISTIC(NumPairCreated, "Number of load/store pair instructions generated");
47 STATISTIC(NumPostFolded, "Number of post-index updates folded");
48 STATISTIC(NumPreFolded, "Number of pre-index updates folded");
49 STATISTIC(NumUnscaledPairCreated,
50           "Number of load/store from unscaled generated");
51 STATISTIC(NumZeroStoresPromoted, "Number of narrow zero stores promoted");
52 STATISTIC(NumLoadsFromStoresPromoted, "Number of loads from stores promoted");
53 
54 // The LdStLimit limits how far we search for load/store pairs.
55 static cl::opt<unsigned> LdStLimit("aarch64-load-store-scan-limit",
56                                    cl::init(20), cl::Hidden);
57 
58 // The UpdateLimit limits how far we search for update instructions when we form
59 // pre-/post-index instructions.
60 static cl::opt<unsigned> UpdateLimit("aarch64-update-scan-limit", cl::init(100),
61                                      cl::Hidden);
62 
63 #define AARCH64_LOAD_STORE_OPT_NAME "AArch64 load / store optimization pass"
64 
65 namespace {
66 
67 typedef struct LdStPairFlags {
68   // If a matching instruction is found, MergeForward is set to true if the
69   // merge is to remove the first instruction and replace the second with
70   // a pair-wise insn, and false if the reverse is true.
71   bool MergeForward = false;
72 
73   // SExtIdx gives the index of the result of the load pair that must be
74   // extended. The value of SExtIdx assumes that the paired load produces the
75   // value in this order: (I, returned iterator), i.e., -1 means no value has
76   // to be extended, 0 means I, and 1 means the returned iterator.
77   int SExtIdx = -1;
78 
79   LdStPairFlags() = default;
80 
81   void setMergeForward(bool V = true) { MergeForward = V; }
82   bool getMergeForward() const { return MergeForward; }
83 
84   void setSExtIdx(int V) { SExtIdx = V; }
85   int getSExtIdx() const { return SExtIdx; }
86 
87 } LdStPairFlags;
88 
89 struct AArch64LoadStoreOpt : public MachineFunctionPass {
90   static char ID;
91 
92   AArch64LoadStoreOpt() : MachineFunctionPass(ID) {
93     initializeAArch64LoadStoreOptPass(*PassRegistry::getPassRegistry());
94   }
95 
96   const AArch64InstrInfo *TII;
97   const TargetRegisterInfo *TRI;
98   const AArch64Subtarget *Subtarget;
99 
100   // Track which registers have been modified and used.
101   BitVector ModifiedRegs, UsedRegs;
102 
103   // Scan the instructions looking for a load/store that can be combined
104   // with the current instruction into a load/store pair.
105   // Return the matching instruction if one is found, else MBB->end().
106   MachineBasicBlock::iterator findMatchingInsn(MachineBasicBlock::iterator I,
107                                                LdStPairFlags &Flags,
108                                                unsigned Limit,
109                                                bool FindNarrowMerge);
110 
111   // Scan the instructions looking for a store that writes to the address from
112   // which the current load instruction reads. Return true if one is found.
113   bool findMatchingStore(MachineBasicBlock::iterator I, unsigned Limit,
114                          MachineBasicBlock::iterator &StoreI);
115 
116   // Merge the two instructions indicated into a wider narrow store instruction.
117   MachineBasicBlock::iterator
118   mergeNarrowZeroStores(MachineBasicBlock::iterator I,
119                         MachineBasicBlock::iterator MergeMI,
120                         const LdStPairFlags &Flags);
121 
122   // Merge the two instructions indicated into a single pair-wise instruction.
123   MachineBasicBlock::iterator
124   mergePairedInsns(MachineBasicBlock::iterator I,
125                    MachineBasicBlock::iterator Paired,
126                    const LdStPairFlags &Flags);
127 
128   // Promote the load that reads directly from the address stored to.
129   MachineBasicBlock::iterator
130   promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
131                        MachineBasicBlock::iterator StoreI);
132 
133   // Scan the instruction list to find a base register update that can
134   // be combined with the current instruction (a load or store) using
135   // pre or post indexed addressing with writeback. Scan forwards.
136   MachineBasicBlock::iterator
137   findMatchingUpdateInsnForward(MachineBasicBlock::iterator I,
138                                 int UnscaledOffset, unsigned Limit);
139 
140   // Scan the instruction list to find a base register update that can
141   // be combined with the current instruction (a load or store) using
142   // pre or post indexed addressing with writeback. Scan backwards.
143   MachineBasicBlock::iterator
144   findMatchingUpdateInsnBackward(MachineBasicBlock::iterator I, unsigned Limit);
145 
146   // Find an instruction that updates the base register of the ld/st
147   // instruction.
148   bool isMatchingUpdateInsn(MachineInstr &MemMI, MachineInstr &MI,
149                             unsigned BaseReg, int Offset);
150 
151   // Merge a pre- or post-index base register update into a ld/st instruction.
152   MachineBasicBlock::iterator
153   mergeUpdateInsn(MachineBasicBlock::iterator I,
154                   MachineBasicBlock::iterator Update, bool IsPreIdx);
155 
156   // Find and merge zero store instructions.
157   bool tryToMergeZeroStInst(MachineBasicBlock::iterator &MBBI);
158 
159   // Find and pair ldr/str instructions.
160   bool tryToPairLdStInst(MachineBasicBlock::iterator &MBBI);
161 
162   // Find and promote load instructions which read directly from store.
163   bool tryToPromoteLoadFromStore(MachineBasicBlock::iterator &MBBI);
164 
165   bool optimizeBlock(MachineBasicBlock &MBB, bool EnableNarrowZeroStOpt);
166 
167   bool runOnMachineFunction(MachineFunction &Fn) override;
168 
169   MachineFunctionProperties getRequiredProperties() const override {
170     return MachineFunctionProperties().set(
171         MachineFunctionProperties::Property::NoVRegs);
172   }
173 
174   StringRef getPassName() const override { return AARCH64_LOAD_STORE_OPT_NAME; }
175 };
176 
177 char AArch64LoadStoreOpt::ID = 0;
178 
179 } // end anonymous namespace
180 
181 INITIALIZE_PASS(AArch64LoadStoreOpt, "aarch64-ldst-opt",
182                 AARCH64_LOAD_STORE_OPT_NAME, false, false)
183 
184 static bool isNarrowStore(unsigned Opc) {
185   switch (Opc) {
186   default:
187     return false;
188   case AArch64::STRBBui:
189   case AArch64::STURBBi:
190   case AArch64::STRHHui:
191   case AArch64::STURHHi:
192     return true;
193   }
194 }
195 
196 // Scaling factor for unscaled load or store.
197 static int getMemScale(MachineInstr &MI) {
198   switch (MI.getOpcode()) {
199   default:
200     llvm_unreachable("Opcode has unknown scale!");
201   case AArch64::LDRBBui:
202   case AArch64::LDURBBi:
203   case AArch64::LDRSBWui:
204   case AArch64::LDURSBWi:
205   case AArch64::STRBBui:
206   case AArch64::STURBBi:
207     return 1;
208   case AArch64::LDRHHui:
209   case AArch64::LDURHHi:
210   case AArch64::LDRSHWui:
211   case AArch64::LDURSHWi:
212   case AArch64::STRHHui:
213   case AArch64::STURHHi:
214     return 2;
215   case AArch64::LDRSui:
216   case AArch64::LDURSi:
217   case AArch64::LDRSWui:
218   case AArch64::LDURSWi:
219   case AArch64::LDRWui:
220   case AArch64::LDURWi:
221   case AArch64::STRSui:
222   case AArch64::STURSi:
223   case AArch64::STRWui:
224   case AArch64::STURWi:
225   case AArch64::LDPSi:
226   case AArch64::LDPSWi:
227   case AArch64::LDPWi:
228   case AArch64::STPSi:
229   case AArch64::STPWi:
230     return 4;
231   case AArch64::LDRDui:
232   case AArch64::LDURDi:
233   case AArch64::LDRXui:
234   case AArch64::LDURXi:
235   case AArch64::STRDui:
236   case AArch64::STURDi:
237   case AArch64::STRXui:
238   case AArch64::STURXi:
239   case AArch64::LDPDi:
240   case AArch64::LDPXi:
241   case AArch64::STPDi:
242   case AArch64::STPXi:
243     return 8;
244   case AArch64::LDRQui:
245   case AArch64::LDURQi:
246   case AArch64::STRQui:
247   case AArch64::STURQi:
248   case AArch64::LDPQi:
249   case AArch64::STPQi:
250     return 16;
251   }
252 }
253 
254 static unsigned getMatchingNonSExtOpcode(unsigned Opc,
255                                          bool *IsValidLdStrOpc = nullptr) {
256   if (IsValidLdStrOpc)
257     *IsValidLdStrOpc = true;
258   switch (Opc) {
259   default:
260     if (IsValidLdStrOpc)
261       *IsValidLdStrOpc = false;
262     return std::numeric_limits<unsigned>::max();
263   case AArch64::STRDui:
264   case AArch64::STURDi:
265   case AArch64::STRQui:
266   case AArch64::STURQi:
267   case AArch64::STRBBui:
268   case AArch64::STURBBi:
269   case AArch64::STRHHui:
270   case AArch64::STURHHi:
271   case AArch64::STRWui:
272   case AArch64::STURWi:
273   case AArch64::STRXui:
274   case AArch64::STURXi:
275   case AArch64::LDRDui:
276   case AArch64::LDURDi:
277   case AArch64::LDRQui:
278   case AArch64::LDURQi:
279   case AArch64::LDRWui:
280   case AArch64::LDURWi:
281   case AArch64::LDRXui:
282   case AArch64::LDURXi:
283   case AArch64::STRSui:
284   case AArch64::STURSi:
285   case AArch64::LDRSui:
286   case AArch64::LDURSi:
287     return Opc;
288   case AArch64::LDRSWui:
289     return AArch64::LDRWui;
290   case AArch64::LDURSWi:
291     return AArch64::LDURWi;
292   }
293 }
294 
295 static unsigned getMatchingWideOpcode(unsigned Opc) {
296   switch (Opc) {
297   default:
298     llvm_unreachable("Opcode has no wide equivalent!");
299   case AArch64::STRBBui:
300     return AArch64::STRHHui;
301   case AArch64::STRHHui:
302     return AArch64::STRWui;
303   case AArch64::STURBBi:
304     return AArch64::STURHHi;
305   case AArch64::STURHHi:
306     return AArch64::STURWi;
307   case AArch64::STURWi:
308     return AArch64::STURXi;
309   case AArch64::STRWui:
310     return AArch64::STRXui;
311   }
312 }
313 
314 static unsigned getMatchingPairOpcode(unsigned Opc) {
315   switch (Opc) {
316   default:
317     llvm_unreachable("Opcode has no pairwise equivalent!");
318   case AArch64::STRSui:
319   case AArch64::STURSi:
320     return AArch64::STPSi;
321   case AArch64::STRDui:
322   case AArch64::STURDi:
323     return AArch64::STPDi;
324   case AArch64::STRQui:
325   case AArch64::STURQi:
326     return AArch64::STPQi;
327   case AArch64::STRWui:
328   case AArch64::STURWi:
329     return AArch64::STPWi;
330   case AArch64::STRXui:
331   case AArch64::STURXi:
332     return AArch64::STPXi;
333   case AArch64::LDRSui:
334   case AArch64::LDURSi:
335     return AArch64::LDPSi;
336   case AArch64::LDRDui:
337   case AArch64::LDURDi:
338     return AArch64::LDPDi;
339   case AArch64::LDRQui:
340   case AArch64::LDURQi:
341     return AArch64::LDPQi;
342   case AArch64::LDRWui:
343   case AArch64::LDURWi:
344     return AArch64::LDPWi;
345   case AArch64::LDRXui:
346   case AArch64::LDURXi:
347     return AArch64::LDPXi;
348   case AArch64::LDRSWui:
349   case AArch64::LDURSWi:
350     return AArch64::LDPSWi;
351   }
352 }
353 
354 static unsigned isMatchingStore(MachineInstr &LoadInst,
355                                 MachineInstr &StoreInst) {
356   unsigned LdOpc = LoadInst.getOpcode();
357   unsigned StOpc = StoreInst.getOpcode();
358   switch (LdOpc) {
359   default:
360     llvm_unreachable("Unsupported load instruction!");
361   case AArch64::LDRBBui:
362     return StOpc == AArch64::STRBBui || StOpc == AArch64::STRHHui ||
363            StOpc == AArch64::STRWui || StOpc == AArch64::STRXui;
364   case AArch64::LDURBBi:
365     return StOpc == AArch64::STURBBi || StOpc == AArch64::STURHHi ||
366            StOpc == AArch64::STURWi || StOpc == AArch64::STURXi;
367   case AArch64::LDRHHui:
368     return StOpc == AArch64::STRHHui || StOpc == AArch64::STRWui ||
369            StOpc == AArch64::STRXui;
370   case AArch64::LDURHHi:
371     return StOpc == AArch64::STURHHi || StOpc == AArch64::STURWi ||
372            StOpc == AArch64::STURXi;
373   case AArch64::LDRWui:
374     return StOpc == AArch64::STRWui || StOpc == AArch64::STRXui;
375   case AArch64::LDURWi:
376     return StOpc == AArch64::STURWi || StOpc == AArch64::STURXi;
377   case AArch64::LDRXui:
378     return StOpc == AArch64::STRXui;
379   case AArch64::LDURXi:
380     return StOpc == AArch64::STURXi;
381   }
382 }
383 
384 static unsigned getPreIndexedOpcode(unsigned Opc) {
385   switch (Opc) {
386   default:
387     llvm_unreachable("Opcode has no pre-indexed equivalent!");
388   case AArch64::STRSui:
389     return AArch64::STRSpre;
390   case AArch64::STRDui:
391     return AArch64::STRDpre;
392   case AArch64::STRQui:
393     return AArch64::STRQpre;
394   case AArch64::STRBBui:
395     return AArch64::STRBBpre;
396   case AArch64::STRHHui:
397     return AArch64::STRHHpre;
398   case AArch64::STRWui:
399     return AArch64::STRWpre;
400   case AArch64::STRXui:
401     return AArch64::STRXpre;
402   case AArch64::LDRSui:
403     return AArch64::LDRSpre;
404   case AArch64::LDRDui:
405     return AArch64::LDRDpre;
406   case AArch64::LDRQui:
407     return AArch64::LDRQpre;
408   case AArch64::LDRBBui:
409     return AArch64::LDRBBpre;
410   case AArch64::LDRHHui:
411     return AArch64::LDRHHpre;
412   case AArch64::LDRWui:
413     return AArch64::LDRWpre;
414   case AArch64::LDRXui:
415     return AArch64::LDRXpre;
416   case AArch64::LDRSWui:
417     return AArch64::LDRSWpre;
418   case AArch64::LDPSi:
419     return AArch64::LDPSpre;
420   case AArch64::LDPSWi:
421     return AArch64::LDPSWpre;
422   case AArch64::LDPDi:
423     return AArch64::LDPDpre;
424   case AArch64::LDPQi:
425     return AArch64::LDPQpre;
426   case AArch64::LDPWi:
427     return AArch64::LDPWpre;
428   case AArch64::LDPXi:
429     return AArch64::LDPXpre;
430   case AArch64::STPSi:
431     return AArch64::STPSpre;
432   case AArch64::STPDi:
433     return AArch64::STPDpre;
434   case AArch64::STPQi:
435     return AArch64::STPQpre;
436   case AArch64::STPWi:
437     return AArch64::STPWpre;
438   case AArch64::STPXi:
439     return AArch64::STPXpre;
440   }
441 }
442 
443 static unsigned getPostIndexedOpcode(unsigned Opc) {
444   switch (Opc) {
445   default:
446     llvm_unreachable("Opcode has no post-indexed wise equivalent!");
447   case AArch64::STRSui:
448     return AArch64::STRSpost;
449   case AArch64::STRDui:
450     return AArch64::STRDpost;
451   case AArch64::STRQui:
452     return AArch64::STRQpost;
453   case AArch64::STRBBui:
454     return AArch64::STRBBpost;
455   case AArch64::STRHHui:
456     return AArch64::STRHHpost;
457   case AArch64::STRWui:
458     return AArch64::STRWpost;
459   case AArch64::STRXui:
460     return AArch64::STRXpost;
461   case AArch64::LDRSui:
462     return AArch64::LDRSpost;
463   case AArch64::LDRDui:
464     return AArch64::LDRDpost;
465   case AArch64::LDRQui:
466     return AArch64::LDRQpost;
467   case AArch64::LDRBBui:
468     return AArch64::LDRBBpost;
469   case AArch64::LDRHHui:
470     return AArch64::LDRHHpost;
471   case AArch64::LDRWui:
472     return AArch64::LDRWpost;
473   case AArch64::LDRXui:
474     return AArch64::LDRXpost;
475   case AArch64::LDRSWui:
476     return AArch64::LDRSWpost;
477   case AArch64::LDPSi:
478     return AArch64::LDPSpost;
479   case AArch64::LDPSWi:
480     return AArch64::LDPSWpost;
481   case AArch64::LDPDi:
482     return AArch64::LDPDpost;
483   case AArch64::LDPQi:
484     return AArch64::LDPQpost;
485   case AArch64::LDPWi:
486     return AArch64::LDPWpost;
487   case AArch64::LDPXi:
488     return AArch64::LDPXpost;
489   case AArch64::STPSi:
490     return AArch64::STPSpost;
491   case AArch64::STPDi:
492     return AArch64::STPDpost;
493   case AArch64::STPQi:
494     return AArch64::STPQpost;
495   case AArch64::STPWi:
496     return AArch64::STPWpost;
497   case AArch64::STPXi:
498     return AArch64::STPXpost;
499   }
500 }
501 
502 static bool isPairedLdSt(const MachineInstr &MI) {
503   switch (MI.getOpcode()) {
504   default:
505     return false;
506   case AArch64::LDPSi:
507   case AArch64::LDPSWi:
508   case AArch64::LDPDi:
509   case AArch64::LDPQi:
510   case AArch64::LDPWi:
511   case AArch64::LDPXi:
512   case AArch64::STPSi:
513   case AArch64::STPDi:
514   case AArch64::STPQi:
515   case AArch64::STPWi:
516   case AArch64::STPXi:
517     return true;
518   }
519 }
520 
521 static const MachineOperand &getLdStRegOp(const MachineInstr &MI,
522                                           unsigned PairedRegOp = 0) {
523   assert(PairedRegOp < 2 && "Unexpected register operand idx.");
524   unsigned Idx = isPairedLdSt(MI) ? PairedRegOp : 0;
525   return MI.getOperand(Idx);
526 }
527 
528 static const MachineOperand &getLdStBaseOp(const MachineInstr &MI) {
529   unsigned Idx = isPairedLdSt(MI) ? 2 : 1;
530   return MI.getOperand(Idx);
531 }
532 
533 static const MachineOperand &getLdStOffsetOp(const MachineInstr &MI) {
534   unsigned Idx = isPairedLdSt(MI) ? 3 : 2;
535   return MI.getOperand(Idx);
536 }
537 
538 static bool isLdOffsetInRangeOfSt(MachineInstr &LoadInst,
539                                   MachineInstr &StoreInst,
540                                   const AArch64InstrInfo *TII) {
541   assert(isMatchingStore(LoadInst, StoreInst) && "Expect only matched ld/st.");
542   int LoadSize = getMemScale(LoadInst);
543   int StoreSize = getMemScale(StoreInst);
544   int UnscaledStOffset = TII->isUnscaledLdSt(StoreInst)
545                              ? getLdStOffsetOp(StoreInst).getImm()
546                              : getLdStOffsetOp(StoreInst).getImm() * StoreSize;
547   int UnscaledLdOffset = TII->isUnscaledLdSt(LoadInst)
548                              ? getLdStOffsetOp(LoadInst).getImm()
549                              : getLdStOffsetOp(LoadInst).getImm() * LoadSize;
550   return (UnscaledStOffset <= UnscaledLdOffset) &&
551          (UnscaledLdOffset + LoadSize <= (UnscaledStOffset + StoreSize));
552 }
553 
554 static bool isPromotableZeroStoreInst(MachineInstr &MI) {
555   unsigned Opc = MI.getOpcode();
556   return (Opc == AArch64::STRWui || Opc == AArch64::STURWi ||
557           isNarrowStore(Opc)) &&
558          getLdStRegOp(MI).getReg() == AArch64::WZR;
559 }
560 
561 MachineBasicBlock::iterator
562 AArch64LoadStoreOpt::mergeNarrowZeroStores(MachineBasicBlock::iterator I,
563                                            MachineBasicBlock::iterator MergeMI,
564                                            const LdStPairFlags &Flags) {
565   assert(isPromotableZeroStoreInst(*I) && isPromotableZeroStoreInst(*MergeMI) &&
566          "Expected promotable zero stores.");
567 
568   MachineBasicBlock::iterator NextI = I;
569   ++NextI;
570   // If NextI is the second of the two instructions to be merged, we need
571   // to skip one further. Either way we merge will invalidate the iterator,
572   // and we don't need to scan the new instruction, as it's a pairwise
573   // instruction, which we're not considering for further action anyway.
574   if (NextI == MergeMI)
575     ++NextI;
576 
577   unsigned Opc = I->getOpcode();
578   bool IsScaled = !TII->isUnscaledLdSt(Opc);
579   int OffsetStride = IsScaled ? 1 : getMemScale(*I);
580 
581   bool MergeForward = Flags.getMergeForward();
582   // Insert our new paired instruction after whichever of the paired
583   // instructions MergeForward indicates.
584   MachineBasicBlock::iterator InsertionPoint = MergeForward ? MergeMI : I;
585   // Also based on MergeForward is from where we copy the base register operand
586   // so we get the flags compatible with the input code.
587   const MachineOperand &BaseRegOp =
588       MergeForward ? getLdStBaseOp(*MergeMI) : getLdStBaseOp(*I);
589 
590   // Which register is Rt and which is Rt2 depends on the offset order.
591   MachineInstr *RtMI;
592   if (getLdStOffsetOp(*I).getImm() ==
593       getLdStOffsetOp(*MergeMI).getImm() + OffsetStride)
594     RtMI = &*MergeMI;
595   else
596     RtMI = &*I;
597 
598   int OffsetImm = getLdStOffsetOp(*RtMI).getImm();
599   // Change the scaled offset from small to large type.
600   if (IsScaled) {
601     assert(((OffsetImm & 1) == 0) && "Unexpected offset to merge");
602     OffsetImm /= 2;
603   }
604 
605   // Construct the new instruction.
606   DebugLoc DL = I->getDebugLoc();
607   MachineBasicBlock *MBB = I->getParent();
608   MachineInstrBuilder MIB;
609   MIB = BuildMI(*MBB, InsertionPoint, DL, TII->get(getMatchingWideOpcode(Opc)))
610             .addReg(isNarrowStore(Opc) ? AArch64::WZR : AArch64::XZR)
611             .add(BaseRegOp)
612             .addImm(OffsetImm)
613             .setMemRefs(I->mergeMemRefsWith(*MergeMI));
614   (void)MIB;
615 
616   DEBUG(dbgs() << "Creating wider store. Replacing instructions:\n    ");
617   DEBUG(I->print(dbgs()));
618   DEBUG(dbgs() << "    ");
619   DEBUG(MergeMI->print(dbgs()));
620   DEBUG(dbgs() << "  with instruction:\n    ");
621   DEBUG(((MachineInstr *)MIB)->print(dbgs()));
622   DEBUG(dbgs() << "\n");
623 
624   // Erase the old instructions.
625   I->eraseFromParent();
626   MergeMI->eraseFromParent();
627   return NextI;
628 }
629 
630 MachineBasicBlock::iterator
631 AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
632                                       MachineBasicBlock::iterator Paired,
633                                       const LdStPairFlags &Flags) {
634   MachineBasicBlock::iterator NextI = I;
635   ++NextI;
636   // If NextI is the second of the two instructions to be merged, we need
637   // to skip one further. Either way we merge will invalidate the iterator,
638   // and we don't need to scan the new instruction, as it's a pairwise
639   // instruction, which we're not considering for further action anyway.
640   if (NextI == Paired)
641     ++NextI;
642 
643   int SExtIdx = Flags.getSExtIdx();
644   unsigned Opc =
645       SExtIdx == -1 ? I->getOpcode() : getMatchingNonSExtOpcode(I->getOpcode());
646   bool IsUnscaled = TII->isUnscaledLdSt(Opc);
647   int OffsetStride = IsUnscaled ? getMemScale(*I) : 1;
648 
649   bool MergeForward = Flags.getMergeForward();
650   // Insert our new paired instruction after whichever of the paired
651   // instructions MergeForward indicates.
652   MachineBasicBlock::iterator InsertionPoint = MergeForward ? Paired : I;
653   // Also based on MergeForward is from where we copy the base register operand
654   // so we get the flags compatible with the input code.
655   const MachineOperand &BaseRegOp =
656       MergeForward ? getLdStBaseOp(*Paired) : getLdStBaseOp(*I);
657 
658   int Offset = getLdStOffsetOp(*I).getImm();
659   int PairedOffset = getLdStOffsetOp(*Paired).getImm();
660   bool PairedIsUnscaled = TII->isUnscaledLdSt(Paired->getOpcode());
661   if (IsUnscaled != PairedIsUnscaled) {
662     // We're trying to pair instructions that differ in how they are scaled.  If
663     // I is scaled then scale the offset of Paired accordingly.  Otherwise, do
664     // the opposite (i.e., make Paired's offset unscaled).
665     int MemSize = getMemScale(*Paired);
666     if (PairedIsUnscaled) {
667       // If the unscaled offset isn't a multiple of the MemSize, we can't
668       // pair the operations together.
669       assert(!(PairedOffset % getMemScale(*Paired)) &&
670              "Offset should be a multiple of the stride!");
671       PairedOffset /= MemSize;
672     } else {
673       PairedOffset *= MemSize;
674     }
675   }
676 
677   // Which register is Rt and which is Rt2 depends on the offset order.
678   MachineInstr *RtMI, *Rt2MI;
679   if (Offset == PairedOffset + OffsetStride) {
680     RtMI = &*Paired;
681     Rt2MI = &*I;
682     // Here we swapped the assumption made for SExtIdx.
683     // I.e., we turn ldp I, Paired into ldp Paired, I.
684     // Update the index accordingly.
685     if (SExtIdx != -1)
686       SExtIdx = (SExtIdx + 1) % 2;
687   } else {
688     RtMI = &*I;
689     Rt2MI = &*Paired;
690   }
691   int OffsetImm = getLdStOffsetOp(*RtMI).getImm();
692   // Scale the immediate offset, if necessary.
693   if (TII->isUnscaledLdSt(RtMI->getOpcode())) {
694     assert(!(OffsetImm % getMemScale(*RtMI)) &&
695            "Unscaled offset cannot be scaled.");
696     OffsetImm /= getMemScale(*RtMI);
697   }
698 
699   // Construct the new instruction.
700   MachineInstrBuilder MIB;
701   DebugLoc DL = I->getDebugLoc();
702   MachineBasicBlock *MBB = I->getParent();
703   MachineOperand RegOp0 = getLdStRegOp(*RtMI);
704   MachineOperand RegOp1 = getLdStRegOp(*Rt2MI);
705   // Kill flags may become invalid when moving stores for pairing.
706   if (RegOp0.isUse()) {
707     if (!MergeForward) {
708       // Clear kill flags on store if moving upwards. Example:
709       //   STRWui %w0, ...
710       //   USE %w1
711       //   STRWui kill %w1  ; need to clear kill flag when moving STRWui upwards
712       RegOp0.setIsKill(false);
713       RegOp1.setIsKill(false);
714     } else {
715       // Clear kill flags of the first stores register. Example:
716       //   STRWui %w1, ...
717       //   USE kill %w1   ; need to clear kill flag when moving STRWui downwards
718       //   STRW %w0
719       unsigned Reg = getLdStRegOp(*I).getReg();
720       for (MachineInstr &MI : make_range(std::next(I), Paired))
721         MI.clearRegisterKills(Reg, TRI);
722     }
723   }
724   MIB = BuildMI(*MBB, InsertionPoint, DL, TII->get(getMatchingPairOpcode(Opc)))
725             .add(RegOp0)
726             .add(RegOp1)
727             .add(BaseRegOp)
728             .addImm(OffsetImm)
729             .setMemRefs(I->mergeMemRefsWith(*Paired));
730 
731   (void)MIB;
732 
733   DEBUG(dbgs() << "Creating pair load/store. Replacing instructions:\n    ");
734   DEBUG(I->print(dbgs()));
735   DEBUG(dbgs() << "    ");
736   DEBUG(Paired->print(dbgs()));
737   DEBUG(dbgs() << "  with instruction:\n    ");
738   if (SExtIdx != -1) {
739     // Generate the sign extension for the proper result of the ldp.
740     // I.e., with X1, that would be:
741     // %W1<def> = KILL %W1, %X1<imp-def>
742     // %X1<def> = SBFMXri %X1<kill>, 0, 31
743     MachineOperand &DstMO = MIB->getOperand(SExtIdx);
744     // Right now, DstMO has the extended register, since it comes from an
745     // extended opcode.
746     unsigned DstRegX = DstMO.getReg();
747     // Get the W variant of that register.
748     unsigned DstRegW = TRI->getSubReg(DstRegX, AArch64::sub_32);
749     // Update the result of LDP to use the W instead of the X variant.
750     DstMO.setReg(DstRegW);
751     DEBUG(((MachineInstr *)MIB)->print(dbgs()));
752     DEBUG(dbgs() << "\n");
753     // Make the machine verifier happy by providing a definition for
754     // the X register.
755     // Insert this definition right after the generated LDP, i.e., before
756     // InsertionPoint.
757     MachineInstrBuilder MIBKill =
758         BuildMI(*MBB, InsertionPoint, DL, TII->get(TargetOpcode::KILL), DstRegW)
759             .addReg(DstRegW)
760             .addReg(DstRegX, RegState::Define);
761     MIBKill->getOperand(2).setImplicit();
762     // Create the sign extension.
763     MachineInstrBuilder MIBSXTW =
764         BuildMI(*MBB, InsertionPoint, DL, TII->get(AArch64::SBFMXri), DstRegX)
765             .addReg(DstRegX)
766             .addImm(0)
767             .addImm(31);
768     (void)MIBSXTW;
769     DEBUG(dbgs() << "  Extend operand:\n    ");
770     DEBUG(((MachineInstr *)MIBSXTW)->print(dbgs()));
771   } else {
772     DEBUG(((MachineInstr *)MIB)->print(dbgs()));
773   }
774   DEBUG(dbgs() << "\n");
775 
776   // Erase the old instructions.
777   I->eraseFromParent();
778   Paired->eraseFromParent();
779 
780   return NextI;
781 }
782 
783 MachineBasicBlock::iterator
784 AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
785                                           MachineBasicBlock::iterator StoreI) {
786   MachineBasicBlock::iterator NextI = LoadI;
787   ++NextI;
788 
789   int LoadSize = getMemScale(*LoadI);
790   int StoreSize = getMemScale(*StoreI);
791   unsigned LdRt = getLdStRegOp(*LoadI).getReg();
792   unsigned StRt = getLdStRegOp(*StoreI).getReg();
793   bool IsStoreXReg = TRI->getRegClass(AArch64::GPR64RegClassID)->contains(StRt);
794 
795   assert((IsStoreXReg ||
796           TRI->getRegClass(AArch64::GPR32RegClassID)->contains(StRt)) &&
797          "Unexpected RegClass");
798 
799   MachineInstr *BitExtMI;
800   if (LoadSize == StoreSize && (LoadSize == 4 || LoadSize == 8)) {
801     // Remove the load, if the destination register of the loads is the same
802     // register for stored value.
803     if (StRt == LdRt && LoadSize == 8) {
804       StoreI->clearRegisterKills(StRt, TRI);
805       DEBUG(dbgs() << "Remove load instruction:\n    ");
806       DEBUG(LoadI->print(dbgs()));
807       DEBUG(dbgs() << "\n");
808       LoadI->eraseFromParent();
809       return NextI;
810     }
811     // Replace the load with a mov if the load and store are in the same size.
812     BitExtMI =
813         BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
814                 TII->get(IsStoreXReg ? AArch64::ORRXrs : AArch64::ORRWrs), LdRt)
815             .addReg(IsStoreXReg ? AArch64::XZR : AArch64::WZR)
816             .addReg(StRt)
817             .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
818   } else {
819     // FIXME: Currently we disable this transformation in big-endian targets as
820     // performance and correctness are verified only in little-endian.
821     if (!Subtarget->isLittleEndian())
822       return NextI;
823     bool IsUnscaled = TII->isUnscaledLdSt(*LoadI);
824     assert(IsUnscaled == TII->isUnscaledLdSt(*StoreI) &&
825            "Unsupported ld/st match");
826     assert(LoadSize <= StoreSize && "Invalid load size");
827     int UnscaledLdOffset = IsUnscaled
828                                ? getLdStOffsetOp(*LoadI).getImm()
829                                : getLdStOffsetOp(*LoadI).getImm() * LoadSize;
830     int UnscaledStOffset = IsUnscaled
831                                ? getLdStOffsetOp(*StoreI).getImm()
832                                : getLdStOffsetOp(*StoreI).getImm() * StoreSize;
833     int Width = LoadSize * 8;
834     int Immr = 8 * (UnscaledLdOffset - UnscaledStOffset);
835     int Imms = Immr + Width - 1;
836     unsigned DestReg = IsStoreXReg
837                            ? TRI->getMatchingSuperReg(LdRt, AArch64::sub_32,
838                                                       &AArch64::GPR64RegClass)
839                            : LdRt;
840 
841     assert((UnscaledLdOffset >= UnscaledStOffset &&
842             (UnscaledLdOffset + LoadSize) <= UnscaledStOffset + StoreSize) &&
843            "Invalid offset");
844 
845     Immr = 8 * (UnscaledLdOffset - UnscaledStOffset);
846     Imms = Immr + Width - 1;
847     if (UnscaledLdOffset == UnscaledStOffset) {
848       uint32_t AndMaskEncoded = ((IsStoreXReg ? 1 : 0) << 12) // N
849                                 | ((Immr) << 6)               // immr
850                                 | ((Imms) << 0)               // imms
851           ;
852 
853       BitExtMI =
854           BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
855                   TII->get(IsStoreXReg ? AArch64::ANDXri : AArch64::ANDWri),
856                   DestReg)
857               .addReg(StRt)
858               .addImm(AndMaskEncoded);
859     } else {
860       BitExtMI =
861           BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
862                   TII->get(IsStoreXReg ? AArch64::UBFMXri : AArch64::UBFMWri),
863                   DestReg)
864               .addReg(StRt)
865               .addImm(Immr)
866               .addImm(Imms);
867     }
868   }
869   StoreI->clearRegisterKills(StRt, TRI);
870 
871   (void)BitExtMI;
872 
873   DEBUG(dbgs() << "Promoting load by replacing :\n    ");
874   DEBUG(StoreI->print(dbgs()));
875   DEBUG(dbgs() << "    ");
876   DEBUG(LoadI->print(dbgs()));
877   DEBUG(dbgs() << "  with instructions:\n    ");
878   DEBUG(StoreI->print(dbgs()));
879   DEBUG(dbgs() << "    ");
880   DEBUG((BitExtMI)->print(dbgs()));
881   DEBUG(dbgs() << "\n");
882 
883   // Erase the old instructions.
884   LoadI->eraseFromParent();
885   return NextI;
886 }
887 
888 /// trackRegDefsUses - Remember what registers the specified instruction uses
889 /// and modifies.
890 static void trackRegDefsUses(const MachineInstr &MI, BitVector &ModifiedRegs,
891                              BitVector &UsedRegs,
892                              const TargetRegisterInfo *TRI) {
893   for (const MachineOperand &MO : MI.operands()) {
894     if (MO.isRegMask())
895       ModifiedRegs.setBitsNotInMask(MO.getRegMask());
896 
897     if (!MO.isReg())
898       continue;
899     unsigned Reg = MO.getReg();
900     if (!Reg)
901       continue;
902     if (MO.isDef()) {
903       // WZR/XZR are not modified even when used as a destination register.
904       if (Reg != AArch64::WZR && Reg != AArch64::XZR)
905         for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
906           ModifiedRegs.set(*AI);
907     } else {
908       assert(MO.isUse() && "Reg operand not a def and not a use?!?");
909       for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
910         UsedRegs.set(*AI);
911     }
912   }
913 }
914 
915 static bool inBoundsForPair(bool IsUnscaled, int Offset, int OffsetStride) {
916   // Convert the byte-offset used by unscaled into an "element" offset used
917   // by the scaled pair load/store instructions.
918   if (IsUnscaled) {
919     // If the byte-offset isn't a multiple of the stride, there's no point
920     // trying to match it.
921     if (Offset % OffsetStride)
922       return false;
923     Offset /= OffsetStride;
924   }
925   return Offset <= 63 && Offset >= -64;
926 }
927 
928 // Do alignment, specialized to power of 2 and for signed ints,
929 // avoiding having to do a C-style cast from uint_64t to int when
930 // using alignTo from include/llvm/Support/MathExtras.h.
931 // FIXME: Move this function to include/MathExtras.h?
932 static int alignTo(int Num, int PowOf2) {
933   return (Num + PowOf2 - 1) & ~(PowOf2 - 1);
934 }
935 
936 static bool mayAlias(MachineInstr &MIa, MachineInstr &MIb,
937                      const AArch64InstrInfo *TII) {
938   // One of the instructions must modify memory.
939   if (!MIa.mayStore() && !MIb.mayStore())
940     return false;
941 
942   // Both instructions must be memory operations.
943   if (!MIa.mayLoadOrStore() && !MIb.mayLoadOrStore())
944     return false;
945 
946   return !TII->areMemAccessesTriviallyDisjoint(MIa, MIb);
947 }
948 
949 static bool mayAlias(MachineInstr &MIa,
950                      SmallVectorImpl<MachineInstr *> &MemInsns,
951                      const AArch64InstrInfo *TII) {
952   for (MachineInstr *MIb : MemInsns)
953     if (mayAlias(MIa, *MIb, TII))
954       return true;
955 
956   return false;
957 }
958 
959 bool AArch64LoadStoreOpt::findMatchingStore(
960     MachineBasicBlock::iterator I, unsigned Limit,
961     MachineBasicBlock::iterator &StoreI) {
962   MachineBasicBlock::iterator B = I->getParent()->begin();
963   MachineBasicBlock::iterator MBBI = I;
964   MachineInstr &LoadMI = *I;
965   unsigned BaseReg = getLdStBaseOp(LoadMI).getReg();
966 
967   // If the load is the first instruction in the block, there's obviously
968   // not any matching store.
969   if (MBBI == B)
970     return false;
971 
972   // Track which registers have been modified and used between the first insn
973   // and the second insn.
974   ModifiedRegs.reset();
975   UsedRegs.reset();
976 
977   unsigned Count = 0;
978   do {
979     --MBBI;
980     MachineInstr &MI = *MBBI;
981 
982     // Don't count transient instructions towards the search limit since there
983     // may be different numbers of them if e.g. debug information is present.
984     if (!MI.isTransient())
985       ++Count;
986 
987     // If the load instruction reads directly from the address to which the
988     // store instruction writes and the stored value is not modified, we can
989     // promote the load. Since we do not handle stores with pre-/post-index,
990     // it's unnecessary to check if BaseReg is modified by the store itself.
991     if (MI.mayStore() && isMatchingStore(LoadMI, MI) &&
992         BaseReg == getLdStBaseOp(MI).getReg() &&
993         isLdOffsetInRangeOfSt(LoadMI, MI, TII) &&
994         !ModifiedRegs[getLdStRegOp(MI).getReg()]) {
995       StoreI = MBBI;
996       return true;
997     }
998 
999     if (MI.isCall())
1000       return false;
1001 
1002     // Update modified / uses register lists.
1003     trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1004 
1005     // Otherwise, if the base register is modified, we have no match, so
1006     // return early.
1007     if (ModifiedRegs[BaseReg])
1008       return false;
1009 
1010     // If we encounter a store aliased with the load, return early.
1011     if (MI.mayStore() && mayAlias(LoadMI, MI, TII))
1012       return false;
1013   } while (MBBI != B && Count < Limit);
1014   return false;
1015 }
1016 
1017 // Returns true if FirstMI and MI are candidates for merging or pairing.
1018 // Otherwise, returns false.
1019 static bool areCandidatesToMergeOrPair(MachineInstr &FirstMI, MachineInstr &MI,
1020                                        LdStPairFlags &Flags,
1021                                        const AArch64InstrInfo *TII) {
1022   // If this is volatile or if pairing is suppressed, not a candidate.
1023   if (MI.hasOrderedMemoryRef() || TII->isLdStPairSuppressed(MI))
1024     return false;
1025 
1026   // We should have already checked FirstMI for pair suppression and volatility.
1027   assert(!FirstMI.hasOrderedMemoryRef() &&
1028          !TII->isLdStPairSuppressed(FirstMI) &&
1029          "FirstMI shouldn't get here if either of these checks are true.");
1030 
1031   unsigned OpcA = FirstMI.getOpcode();
1032   unsigned OpcB = MI.getOpcode();
1033 
1034   // Opcodes match: nothing more to check.
1035   if (OpcA == OpcB)
1036     return true;
1037 
1038   // Try to match a sign-extended load/store with a zero-extended load/store.
1039   bool IsValidLdStrOpc, PairIsValidLdStrOpc;
1040   unsigned NonSExtOpc = getMatchingNonSExtOpcode(OpcA, &IsValidLdStrOpc);
1041   assert(IsValidLdStrOpc &&
1042          "Given Opc should be a Load or Store with an immediate");
1043   // OpcA will be the first instruction in the pair.
1044   if (NonSExtOpc == getMatchingNonSExtOpcode(OpcB, &PairIsValidLdStrOpc)) {
1045     Flags.setSExtIdx(NonSExtOpc == (unsigned)OpcA ? 1 : 0);
1046     return true;
1047   }
1048 
1049   // If the second instruction isn't even a mergable/pairable load/store, bail
1050   // out.
1051   if (!PairIsValidLdStrOpc)
1052     return false;
1053 
1054   // FIXME: We don't support merging narrow stores with mixed scaled/unscaled
1055   // offsets.
1056   if (isNarrowStore(OpcA) || isNarrowStore(OpcB))
1057     return false;
1058 
1059   // Try to match an unscaled load/store with a scaled load/store.
1060   return TII->isUnscaledLdSt(OpcA) != TII->isUnscaledLdSt(OpcB) &&
1061          getMatchingPairOpcode(OpcA) == getMatchingPairOpcode(OpcB);
1062 
1063   // FIXME: Can we also match a mixed sext/zext unscaled/scaled pair?
1064 }
1065 
1066 /// Scan the instructions looking for a load/store that can be combined with the
1067 /// current instruction into a wider equivalent or a load/store pair.
1068 MachineBasicBlock::iterator
1069 AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
1070                                       LdStPairFlags &Flags, unsigned Limit,
1071                                       bool FindNarrowMerge) {
1072   MachineBasicBlock::iterator E = I->getParent()->end();
1073   MachineBasicBlock::iterator MBBI = I;
1074   MachineInstr &FirstMI = *I;
1075   ++MBBI;
1076 
1077   bool MayLoad = FirstMI.mayLoad();
1078   bool IsUnscaled = TII->isUnscaledLdSt(FirstMI);
1079   unsigned Reg = getLdStRegOp(FirstMI).getReg();
1080   unsigned BaseReg = getLdStBaseOp(FirstMI).getReg();
1081   int Offset = getLdStOffsetOp(FirstMI).getImm();
1082   int OffsetStride = IsUnscaled ? getMemScale(FirstMI) : 1;
1083   bool IsPromotableZeroStore = isPromotableZeroStoreInst(FirstMI);
1084 
1085   // Track which registers have been modified and used between the first insn
1086   // (inclusive) and the second insn.
1087   ModifiedRegs.reset();
1088   UsedRegs.reset();
1089 
1090   // Remember any instructions that read/write memory between FirstMI and MI.
1091   SmallVector<MachineInstr *, 4> MemInsns;
1092 
1093   for (unsigned Count = 0; MBBI != E && Count < Limit; ++MBBI) {
1094     MachineInstr &MI = *MBBI;
1095 
1096     // Don't count transient instructions towards the search limit since there
1097     // may be different numbers of them if e.g. debug information is present.
1098     if (!MI.isTransient())
1099       ++Count;
1100 
1101     Flags.setSExtIdx(-1);
1102     if (areCandidatesToMergeOrPair(FirstMI, MI, Flags, TII) &&
1103         getLdStOffsetOp(MI).isImm()) {
1104       assert(MI.mayLoadOrStore() && "Expected memory operation.");
1105       // If we've found another instruction with the same opcode, check to see
1106       // if the base and offset are compatible with our starting instruction.
1107       // These instructions all have scaled immediate operands, so we just
1108       // check for +1/-1. Make sure to check the new instruction offset is
1109       // actually an immediate and not a symbolic reference destined for
1110       // a relocation.
1111       unsigned MIBaseReg = getLdStBaseOp(MI).getReg();
1112       int MIOffset = getLdStOffsetOp(MI).getImm();
1113       bool MIIsUnscaled = TII->isUnscaledLdSt(MI);
1114       if (IsUnscaled != MIIsUnscaled) {
1115         // We're trying to pair instructions that differ in how they are scaled.
1116         // If FirstMI is scaled then scale the offset of MI accordingly.
1117         // Otherwise, do the opposite (i.e., make MI's offset unscaled).
1118         int MemSize = getMemScale(MI);
1119         if (MIIsUnscaled) {
1120           // If the unscaled offset isn't a multiple of the MemSize, we can't
1121           // pair the operations together: bail and keep looking.
1122           if (MIOffset % MemSize) {
1123             trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1124             MemInsns.push_back(&MI);
1125             continue;
1126           }
1127           MIOffset /= MemSize;
1128         } else {
1129           MIOffset *= MemSize;
1130         }
1131       }
1132 
1133       if (BaseReg == MIBaseReg && ((Offset == MIOffset + OffsetStride) ||
1134                                    (Offset + OffsetStride == MIOffset))) {
1135         int MinOffset = Offset < MIOffset ? Offset : MIOffset;
1136         if (FindNarrowMerge) {
1137           // If the alignment requirements of the scaled wide load/store
1138           // instruction can't express the offset of the scaled narrow input,
1139           // bail and keep looking. For promotable zero stores, allow only when
1140           // the stored value is the same (i.e., WZR).
1141           if ((!IsUnscaled && alignTo(MinOffset, 2) != MinOffset) ||
1142               (IsPromotableZeroStore && Reg != getLdStRegOp(MI).getReg())) {
1143             trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1144             MemInsns.push_back(&MI);
1145             continue;
1146           }
1147         } else {
1148           // Pairwise instructions have a 7-bit signed offset field. Single
1149           // insns have a 12-bit unsigned offset field.  If the resultant
1150           // immediate offset of merging these instructions is out of range for
1151           // a pairwise instruction, bail and keep looking.
1152           if (!inBoundsForPair(IsUnscaled, MinOffset, OffsetStride)) {
1153             trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1154             MemInsns.push_back(&MI);
1155             continue;
1156           }
1157           // If the alignment requirements of the paired (scaled) instruction
1158           // can't express the offset of the unscaled input, bail and keep
1159           // looking.
1160           if (IsUnscaled && (alignTo(MinOffset, OffsetStride) != MinOffset)) {
1161             trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1162             MemInsns.push_back(&MI);
1163             continue;
1164           }
1165         }
1166         // If the destination register of the loads is the same register, bail
1167         // and keep looking. A load-pair instruction with both destination
1168         // registers the same is UNPREDICTABLE and will result in an exception.
1169         if (MayLoad && Reg == getLdStRegOp(MI).getReg()) {
1170           trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1171           MemInsns.push_back(&MI);
1172           continue;
1173         }
1174 
1175         // If the Rt of the second instruction was not modified or used between
1176         // the two instructions and none of the instructions between the second
1177         // and first alias with the second, we can combine the second into the
1178         // first.
1179         if (!ModifiedRegs[getLdStRegOp(MI).getReg()] &&
1180             !(MI.mayLoad() && UsedRegs[getLdStRegOp(MI).getReg()]) &&
1181             !mayAlias(MI, MemInsns, TII)) {
1182           Flags.setMergeForward(false);
1183           return MBBI;
1184         }
1185 
1186         // Likewise, if the Rt of the first instruction is not modified or used
1187         // between the two instructions and none of the instructions between the
1188         // first and the second alias with the first, we can combine the first
1189         // into the second.
1190         if (!ModifiedRegs[getLdStRegOp(FirstMI).getReg()] &&
1191             !(MayLoad && UsedRegs[getLdStRegOp(FirstMI).getReg()]) &&
1192             !mayAlias(FirstMI, MemInsns, TII)) {
1193           Flags.setMergeForward(true);
1194           return MBBI;
1195         }
1196         // Unable to combine these instructions due to interference in between.
1197         // Keep looking.
1198       }
1199     }
1200 
1201     // If the instruction wasn't a matching load or store.  Stop searching if we
1202     // encounter a call instruction that might modify memory.
1203     if (MI.isCall())
1204       return E;
1205 
1206     // Update modified / uses register lists.
1207     trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1208 
1209     // Otherwise, if the base register is modified, we have no match, so
1210     // return early.
1211     if (ModifiedRegs[BaseReg])
1212       return E;
1213 
1214     // Update list of instructions that read/write memory.
1215     if (MI.mayLoadOrStore())
1216       MemInsns.push_back(&MI);
1217   }
1218   return E;
1219 }
1220 
1221 MachineBasicBlock::iterator
1222 AArch64LoadStoreOpt::mergeUpdateInsn(MachineBasicBlock::iterator I,
1223                                      MachineBasicBlock::iterator Update,
1224                                      bool IsPreIdx) {
1225   assert((Update->getOpcode() == AArch64::ADDXri ||
1226           Update->getOpcode() == AArch64::SUBXri) &&
1227          "Unexpected base register update instruction to merge!");
1228   MachineBasicBlock::iterator NextI = I;
1229   // Return the instruction following the merged instruction, which is
1230   // the instruction following our unmerged load. Unless that's the add/sub
1231   // instruction we're merging, in which case it's the one after that.
1232   if (++NextI == Update)
1233     ++NextI;
1234 
1235   int Value = Update->getOperand(2).getImm();
1236   assert(AArch64_AM::getShiftValue(Update->getOperand(3).getImm()) == 0 &&
1237          "Can't merge 1 << 12 offset into pre-/post-indexed load / store");
1238   if (Update->getOpcode() == AArch64::SUBXri)
1239     Value = -Value;
1240 
1241   unsigned NewOpc = IsPreIdx ? getPreIndexedOpcode(I->getOpcode())
1242                              : getPostIndexedOpcode(I->getOpcode());
1243   MachineInstrBuilder MIB;
1244   if (!isPairedLdSt(*I)) {
1245     // Non-paired instruction.
1246     MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
1247               .add(getLdStRegOp(*Update))
1248               .add(getLdStRegOp(*I))
1249               .add(getLdStBaseOp(*I))
1250               .addImm(Value)
1251               .setMemRefs(I->memoperands_begin(), I->memoperands_end());
1252   } else {
1253     // Paired instruction.
1254     int Scale = getMemScale(*I);
1255     MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
1256               .add(getLdStRegOp(*Update))
1257               .add(getLdStRegOp(*I, 0))
1258               .add(getLdStRegOp(*I, 1))
1259               .add(getLdStBaseOp(*I))
1260               .addImm(Value / Scale)
1261               .setMemRefs(I->memoperands_begin(), I->memoperands_end());
1262   }
1263   (void)MIB;
1264 
1265   if (IsPreIdx)
1266     DEBUG(dbgs() << "Creating pre-indexed load/store.");
1267   else
1268     DEBUG(dbgs() << "Creating post-indexed load/store.");
1269   DEBUG(dbgs() << "    Replacing instructions:\n    ");
1270   DEBUG(I->print(dbgs()));
1271   DEBUG(dbgs() << "    ");
1272   DEBUG(Update->print(dbgs()));
1273   DEBUG(dbgs() << "  with instruction:\n    ");
1274   DEBUG(((MachineInstr *)MIB)->print(dbgs()));
1275   DEBUG(dbgs() << "\n");
1276 
1277   // Erase the old instructions for the block.
1278   I->eraseFromParent();
1279   Update->eraseFromParent();
1280 
1281   return NextI;
1282 }
1283 
1284 bool AArch64LoadStoreOpt::isMatchingUpdateInsn(MachineInstr &MemMI,
1285                                                MachineInstr &MI,
1286                                                unsigned BaseReg, int Offset) {
1287   switch (MI.getOpcode()) {
1288   default:
1289     break;
1290   case AArch64::SUBXri:
1291   case AArch64::ADDXri:
1292     // Make sure it's a vanilla immediate operand, not a relocation or
1293     // anything else we can't handle.
1294     if (!MI.getOperand(2).isImm())
1295       break;
1296     // Watch out for 1 << 12 shifted value.
1297     if (AArch64_AM::getShiftValue(MI.getOperand(3).getImm()))
1298       break;
1299 
1300     // The update instruction source and destination register must be the
1301     // same as the load/store base register.
1302     if (MI.getOperand(0).getReg() != BaseReg ||
1303         MI.getOperand(1).getReg() != BaseReg)
1304       break;
1305 
1306     bool IsPairedInsn = isPairedLdSt(MemMI);
1307     int UpdateOffset = MI.getOperand(2).getImm();
1308     if (MI.getOpcode() == AArch64::SUBXri)
1309       UpdateOffset = -UpdateOffset;
1310 
1311     // For non-paired load/store instructions, the immediate must fit in a
1312     // signed 9-bit integer.
1313     if (!IsPairedInsn && (UpdateOffset > 255 || UpdateOffset < -256))
1314       break;
1315 
1316     // For paired load/store instructions, the immediate must be a multiple of
1317     // the scaling factor.  The scaled offset must also fit into a signed 7-bit
1318     // integer.
1319     if (IsPairedInsn) {
1320       int Scale = getMemScale(MemMI);
1321       if (UpdateOffset % Scale != 0)
1322         break;
1323 
1324       int ScaledOffset = UpdateOffset / Scale;
1325       if (ScaledOffset > 63 || ScaledOffset < -64)
1326         break;
1327     }
1328 
1329     // If we have a non-zero Offset, we check that it matches the amount
1330     // we're adding to the register.
1331     if (!Offset || Offset == UpdateOffset)
1332       return true;
1333     break;
1334   }
1335   return false;
1336 }
1337 
1338 MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnForward(
1339     MachineBasicBlock::iterator I, int UnscaledOffset, unsigned Limit) {
1340   MachineBasicBlock::iterator E = I->getParent()->end();
1341   MachineInstr &MemMI = *I;
1342   MachineBasicBlock::iterator MBBI = I;
1343 
1344   unsigned BaseReg = getLdStBaseOp(MemMI).getReg();
1345   int MIUnscaledOffset = getLdStOffsetOp(MemMI).getImm() * getMemScale(MemMI);
1346 
1347   // Scan forward looking for post-index opportunities.  Updating instructions
1348   // can't be formed if the memory instruction doesn't have the offset we're
1349   // looking for.
1350   if (MIUnscaledOffset != UnscaledOffset)
1351     return E;
1352 
1353   // If the base register overlaps a destination register, we can't
1354   // merge the update.
1355   bool IsPairedInsn = isPairedLdSt(MemMI);
1356   for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
1357     unsigned DestReg = getLdStRegOp(MemMI, i).getReg();
1358     if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
1359       return E;
1360   }
1361 
1362   // Track which registers have been modified and used between the first insn
1363   // (inclusive) and the second insn.
1364   ModifiedRegs.reset();
1365   UsedRegs.reset();
1366   ++MBBI;
1367   for (unsigned Count = 0; MBBI != E && Count < Limit; ++MBBI) {
1368     MachineInstr &MI = *MBBI;
1369 
1370     // Don't count transient instructions towards the search limit since there
1371     // may be different numbers of them if e.g. debug information is present.
1372     if (!MI.isTransient())
1373       ++Count;
1374 
1375     // If we found a match, return it.
1376     if (isMatchingUpdateInsn(*I, MI, BaseReg, UnscaledOffset))
1377       return MBBI;
1378 
1379     // Update the status of what the instruction clobbered and used.
1380     trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1381 
1382     // Otherwise, if the base register is used or modified, we have no match, so
1383     // return early.
1384     if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg])
1385       return E;
1386   }
1387   return E;
1388 }
1389 
1390 MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnBackward(
1391     MachineBasicBlock::iterator I, unsigned Limit) {
1392   MachineBasicBlock::iterator B = I->getParent()->begin();
1393   MachineBasicBlock::iterator E = I->getParent()->end();
1394   MachineInstr &MemMI = *I;
1395   MachineBasicBlock::iterator MBBI = I;
1396 
1397   unsigned BaseReg = getLdStBaseOp(MemMI).getReg();
1398   int Offset = getLdStOffsetOp(MemMI).getImm();
1399 
1400   // If the load/store is the first instruction in the block, there's obviously
1401   // not any matching update. Ditto if the memory offset isn't zero.
1402   if (MBBI == B || Offset != 0)
1403     return E;
1404   // If the base register overlaps a destination register, we can't
1405   // merge the update.
1406   bool IsPairedInsn = isPairedLdSt(MemMI);
1407   for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
1408     unsigned DestReg = getLdStRegOp(MemMI, i).getReg();
1409     if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
1410       return E;
1411   }
1412 
1413   // Track which registers have been modified and used between the first insn
1414   // (inclusive) and the second insn.
1415   ModifiedRegs.reset();
1416   UsedRegs.reset();
1417   unsigned Count = 0;
1418   do {
1419     --MBBI;
1420     MachineInstr &MI = *MBBI;
1421 
1422     // Don't count transient instructions towards the search limit since there
1423     // may be different numbers of them if e.g. debug information is present.
1424     if (!MI.isTransient())
1425       ++Count;
1426 
1427     // If we found a match, return it.
1428     if (isMatchingUpdateInsn(*I, MI, BaseReg, Offset))
1429       return MBBI;
1430 
1431     // Update the status of what the instruction clobbered and used.
1432     trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1433 
1434     // Otherwise, if the base register is used or modified, we have no match, so
1435     // return early.
1436     if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg])
1437       return E;
1438   } while (MBBI != B && Count < Limit);
1439   return E;
1440 }
1441 
1442 bool AArch64LoadStoreOpt::tryToPromoteLoadFromStore(
1443     MachineBasicBlock::iterator &MBBI) {
1444   MachineInstr &MI = *MBBI;
1445   // If this is a volatile load, don't mess with it.
1446   if (MI.hasOrderedMemoryRef())
1447     return false;
1448 
1449   // Make sure this is a reg+imm.
1450   // FIXME: It is possible to extend it to handle reg+reg cases.
1451   if (!getLdStOffsetOp(MI).isImm())
1452     return false;
1453 
1454   // Look backward up to LdStLimit instructions.
1455   MachineBasicBlock::iterator StoreI;
1456   if (findMatchingStore(MBBI, LdStLimit, StoreI)) {
1457     ++NumLoadsFromStoresPromoted;
1458     // Promote the load. Keeping the iterator straight is a
1459     // pain, so we let the merge routine tell us what the next instruction
1460     // is after it's done mucking about.
1461     MBBI = promoteLoadFromStore(MBBI, StoreI);
1462     return true;
1463   }
1464   return false;
1465 }
1466 
1467 // Merge adjacent zero stores into a wider store.
1468 bool AArch64LoadStoreOpt::tryToMergeZeroStInst(
1469     MachineBasicBlock::iterator &MBBI) {
1470   assert(isPromotableZeroStoreInst(*MBBI) && "Expected narrow store.");
1471   MachineInstr &MI = *MBBI;
1472   MachineBasicBlock::iterator E = MI.getParent()->end();
1473 
1474   if (!TII->isCandidateToMergeOrPair(MI))
1475     return false;
1476 
1477   // Look ahead up to LdStLimit instructions for a mergable instruction.
1478   LdStPairFlags Flags;
1479   MachineBasicBlock::iterator MergeMI =
1480       findMatchingInsn(MBBI, Flags, LdStLimit, /* FindNarrowMerge = */ true);
1481   if (MergeMI != E) {
1482     ++NumZeroStoresPromoted;
1483 
1484     // Keeping the iterator straight is a pain, so we let the merge routine tell
1485     // us what the next instruction is after it's done mucking about.
1486     MBBI = mergeNarrowZeroStores(MBBI, MergeMI, Flags);
1487     return true;
1488   }
1489   return false;
1490 }
1491 
1492 // Find loads and stores that can be merged into a single load or store pair
1493 // instruction.
1494 bool AArch64LoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) {
1495   MachineInstr &MI = *MBBI;
1496   MachineBasicBlock::iterator E = MI.getParent()->end();
1497 
1498   if (!TII->isCandidateToMergeOrPair(MI))
1499     return false;
1500 
1501   // Early exit if the offset is not possible to match. (6 bits of positive
1502   // range, plus allow an extra one in case we find a later insn that matches
1503   // with Offset-1)
1504   bool IsUnscaled = TII->isUnscaledLdSt(MI);
1505   int Offset = getLdStOffsetOp(MI).getImm();
1506   int OffsetStride = IsUnscaled ? getMemScale(MI) : 1;
1507   // Allow one more for offset.
1508   if (Offset > 0)
1509     Offset -= OffsetStride;
1510   if (!inBoundsForPair(IsUnscaled, Offset, OffsetStride))
1511     return false;
1512 
1513   // Look ahead up to LdStLimit instructions for a pairable instruction.
1514   LdStPairFlags Flags;
1515   MachineBasicBlock::iterator Paired =
1516       findMatchingInsn(MBBI, Flags, LdStLimit, /* FindNarrowMerge = */ false);
1517   if (Paired != E) {
1518     ++NumPairCreated;
1519     if (TII->isUnscaledLdSt(MI))
1520       ++NumUnscaledPairCreated;
1521     // Keeping the iterator straight is a pain, so we let the merge routine tell
1522     // us what the next instruction is after it's done mucking about.
1523     MBBI = mergePairedInsns(MBBI, Paired, Flags);
1524     return true;
1525   }
1526   return false;
1527 }
1528 
1529 bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
1530                                         bool EnableNarrowZeroStOpt) {
1531   bool Modified = false;
1532   // Four tranformations to do here:
1533   // 1) Find loads that directly read from stores and promote them by
1534   //    replacing with mov instructions. If the store is wider than the load,
1535   //    the load will be replaced with a bitfield extract.
1536   //      e.g.,
1537   //        str w1, [x0, #4]
1538   //        ldrh w2, [x0, #6]
1539   //        ; becomes
1540   //        str w1, [x0, #4]
1541   //        lsr w2, w1, #16
1542   for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
1543        MBBI != E;) {
1544     MachineInstr &MI = *MBBI;
1545     switch (MI.getOpcode()) {
1546     default:
1547       // Just move on to the next instruction.
1548       ++MBBI;
1549       break;
1550     // Scaled instructions.
1551     case AArch64::LDRBBui:
1552     case AArch64::LDRHHui:
1553     case AArch64::LDRWui:
1554     case AArch64::LDRXui:
1555     // Unscaled instructions.
1556     case AArch64::LDURBBi:
1557     case AArch64::LDURHHi:
1558     case AArch64::LDURWi:
1559     case AArch64::LDURXi:
1560       if (tryToPromoteLoadFromStore(MBBI)) {
1561         Modified = true;
1562         break;
1563       }
1564       ++MBBI;
1565       break;
1566     }
1567   }
1568   // 2) Merge adjacent zero stores into a wider store.
1569   //      e.g.,
1570   //        strh wzr, [x0]
1571   //        strh wzr, [x0, #2]
1572   //        ; becomes
1573   //        str wzr, [x0]
1574   //      e.g.,
1575   //        str wzr, [x0]
1576   //        str wzr, [x0, #4]
1577   //        ; becomes
1578   //        str xzr, [x0]
1579   for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
1580        EnableNarrowZeroStOpt && MBBI != E;) {
1581     if (isPromotableZeroStoreInst(*MBBI)) {
1582       if (tryToMergeZeroStInst(MBBI)) {
1583         Modified = true;
1584       } else
1585         ++MBBI;
1586     } else
1587       ++MBBI;
1588   }
1589 
1590   // 3) Find loads and stores that can be merged into a single load or store
1591   //    pair instruction.
1592   //      e.g.,
1593   //        ldr x0, [x2]
1594   //        ldr x1, [x2, #8]
1595   //        ; becomes
1596   //        ldp x0, x1, [x2]
1597   for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
1598        MBBI != E;) {
1599     if (TII->isPairableLdStInst(*MBBI) && tryToPairLdStInst(MBBI))
1600       Modified = true;
1601     else
1602       ++MBBI;
1603   }
1604   // 4) Find base register updates that can be merged into the load or store
1605   //    as a base-reg writeback.
1606   //      e.g.,
1607   //        ldr x0, [x2]
1608   //        add x2, x2, #4
1609   //        ; becomes
1610   //        ldr x0, [x2], #4
1611   for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
1612        MBBI != E;) {
1613     MachineInstr &MI = *MBBI;
1614     // Do update merging. It's simpler to keep this separate from the above
1615     // switchs, though not strictly necessary.
1616     unsigned Opc = MI.getOpcode();
1617     switch (Opc) {
1618     default:
1619       // Just move on to the next instruction.
1620       ++MBBI;
1621       break;
1622     // Scaled instructions.
1623     case AArch64::STRSui:
1624     case AArch64::STRDui:
1625     case AArch64::STRQui:
1626     case AArch64::STRXui:
1627     case AArch64::STRWui:
1628     case AArch64::STRHHui:
1629     case AArch64::STRBBui:
1630     case AArch64::LDRSui:
1631     case AArch64::LDRDui:
1632     case AArch64::LDRQui:
1633     case AArch64::LDRXui:
1634     case AArch64::LDRWui:
1635     case AArch64::LDRHHui:
1636     case AArch64::LDRBBui:
1637     // Unscaled instructions.
1638     case AArch64::STURSi:
1639     case AArch64::STURDi:
1640     case AArch64::STURQi:
1641     case AArch64::STURWi:
1642     case AArch64::STURXi:
1643     case AArch64::LDURSi:
1644     case AArch64::LDURDi:
1645     case AArch64::LDURQi:
1646     case AArch64::LDURWi:
1647     case AArch64::LDURXi:
1648     // Paired instructions.
1649     case AArch64::LDPSi:
1650     case AArch64::LDPSWi:
1651     case AArch64::LDPDi:
1652     case AArch64::LDPQi:
1653     case AArch64::LDPWi:
1654     case AArch64::LDPXi:
1655     case AArch64::STPSi:
1656     case AArch64::STPDi:
1657     case AArch64::STPQi:
1658     case AArch64::STPWi:
1659     case AArch64::STPXi: {
1660       // Make sure this is a reg+imm (as opposed to an address reloc).
1661       if (!getLdStOffsetOp(MI).isImm()) {
1662         ++MBBI;
1663         break;
1664       }
1665       // Look forward to try to form a post-index instruction. For example,
1666       // ldr x0, [x20]
1667       // add x20, x20, #32
1668       //   merged into:
1669       // ldr x0, [x20], #32
1670       MachineBasicBlock::iterator Update =
1671           findMatchingUpdateInsnForward(MBBI, 0, UpdateLimit);
1672       if (Update != E) {
1673         // Merge the update into the ld/st.
1674         MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/false);
1675         Modified = true;
1676         ++NumPostFolded;
1677         break;
1678       }
1679       // Don't know how to handle pre/post-index versions, so move to the next
1680       // instruction.
1681       if (TII->isUnscaledLdSt(Opc)) {
1682         ++MBBI;
1683         break;
1684       }
1685 
1686       // Look back to try to find a pre-index instruction. For example,
1687       // add x0, x0, #8
1688       // ldr x1, [x0]
1689       //   merged into:
1690       // ldr x1, [x0, #8]!
1691       Update = findMatchingUpdateInsnBackward(MBBI, UpdateLimit);
1692       if (Update != E) {
1693         // Merge the update into the ld/st.
1694         MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true);
1695         Modified = true;
1696         ++NumPreFolded;
1697         break;
1698       }
1699       // The immediate in the load/store is scaled by the size of the memory
1700       // operation. The immediate in the add we're looking for,
1701       // however, is not, so adjust here.
1702       int UnscaledOffset = getLdStOffsetOp(MI).getImm() * getMemScale(MI);
1703 
1704       // Look forward to try to find a post-index instruction. For example,
1705       // ldr x1, [x0, #64]
1706       // add x0, x0, #64
1707       //   merged into:
1708       // ldr x1, [x0, #64]!
1709       Update = findMatchingUpdateInsnForward(MBBI, UnscaledOffset, UpdateLimit);
1710       if (Update != E) {
1711         // Merge the update into the ld/st.
1712         MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true);
1713         Modified = true;
1714         ++NumPreFolded;
1715         break;
1716       }
1717 
1718       // Nothing found. Just move to the next instruction.
1719       ++MBBI;
1720       break;
1721     }
1722     }
1723   }
1724 
1725   return Modified;
1726 }
1727 
1728 bool AArch64LoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
1729   if (skipFunction(*Fn.getFunction()))
1730     return false;
1731 
1732   Subtarget = &static_cast<const AArch64Subtarget &>(Fn.getSubtarget());
1733   TII = static_cast<const AArch64InstrInfo *>(Subtarget->getInstrInfo());
1734   TRI = Subtarget->getRegisterInfo();
1735 
1736   // Resize the modified and used register bitfield trackers.  We do this once
1737   // per function and then clear the bitfield each time we optimize a load or
1738   // store.
1739   ModifiedRegs.resize(TRI->getNumRegs());
1740   UsedRegs.resize(TRI->getNumRegs());
1741 
1742   bool Modified = false;
1743   bool enableNarrowZeroStOpt = !Subtarget->requiresStrictAlign();
1744   for (auto &MBB : Fn)
1745     Modified |= optimizeBlock(MBB, enableNarrowZeroStOpt);
1746 
1747   return Modified;
1748 }
1749 
1750 // FIXME: Do we need/want a pre-alloc pass like ARM has to try to keep loads and
1751 // stores near one another?  Note: The pre-RA instruction scheduler already has
1752 // hooks to try and schedule pairable loads/stores together to improve pairing
1753 // opportunities.  Thus, pre-RA pairing pass may not be worth the effort.
1754 
1755 // FIXME: When pairing store instructions it's very possible for this pass to
1756 // hoist a store with a KILL marker above another use (without a KILL marker).
1757 // The resulting IR is invalid, but nothing uses the KILL markers after this
1758 // pass, so it's never caused a problem in practice.
1759 
1760 /// createAArch64LoadStoreOptimizationPass - returns an instance of the
1761 /// load / store optimization pass.
1762 FunctionPass *llvm::createAArch64LoadStoreOptimizationPass() {
1763   return new AArch64LoadStoreOpt();
1764 }
1765