xref: /llvm-project/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp (revision 452efb35df641bac80a901c867ddfdd4de6ad2f4)
1 //===- AArch64LoadStoreOptimizer.cpp - AArch64 load/store opt. pass -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains a pass that performs load / store related peephole
10 // optimizations. This pass should be run after register allocation.
11 //
12 // The pass runs after the PrologEpilogInserter where we emit the CFI
13 // instructions. In order to preserve the correctness of the unwind informaiton,
14 // the pass should not change the order of any two instructions, one of which
15 // has the FrameSetup/FrameDestroy flag or, alternatively, apply an add-hoc fix
16 // to unwind information.
17 //
18 //===----------------------------------------------------------------------===//
19 
20 #include "AArch64InstrInfo.h"
21 #include "AArch64MachineFunctionInfo.h"
22 #include "AArch64Subtarget.h"
23 #include "MCTargetDesc/AArch64AddressingModes.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/ADT/StringRef.h"
27 #include "llvm/ADT/iterator_range.h"
28 #include "llvm/Analysis/AliasAnalysis.h"
29 #include "llvm/CodeGen/MachineBasicBlock.h"
30 #include "llvm/CodeGen/MachineFunction.h"
31 #include "llvm/CodeGen/MachineFunctionPass.h"
32 #include "llvm/CodeGen/MachineInstr.h"
33 #include "llvm/CodeGen/MachineInstrBuilder.h"
34 #include "llvm/CodeGen/MachineOperand.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/CodeGen/TargetRegisterInfo.h"
37 #include "llvm/IR/DebugLoc.h"
38 #include "llvm/MC/MCAsmInfo.h"
39 #include "llvm/MC/MCDwarf.h"
40 #include "llvm/Pass.h"
41 #include "llvm/Support/CommandLine.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Support/DebugCounter.h"
44 #include "llvm/Support/ErrorHandling.h"
45 #include "llvm/Support/raw_ostream.h"
46 #include <cassert>
47 #include <cstdint>
48 #include <functional>
49 #include <iterator>
50 #include <limits>
51 #include <optional>
52 
53 using namespace llvm;
54 
55 #define DEBUG_TYPE "aarch64-ldst-opt"
56 
57 STATISTIC(NumPairCreated, "Number of load/store pair instructions generated");
58 STATISTIC(NumPostFolded, "Number of post-index updates folded");
59 STATISTIC(NumPreFolded, "Number of pre-index updates folded");
60 STATISTIC(NumUnscaledPairCreated,
61           "Number of load/store from unscaled generated");
62 STATISTIC(NumZeroStoresPromoted, "Number of narrow zero stores promoted");
63 STATISTIC(NumLoadsFromStoresPromoted, "Number of loads from stores promoted");
64 STATISTIC(NumFailedAlignmentCheck, "Number of load/store pair transformation "
65                                    "not passed the alignment check");
66 STATISTIC(NumConstOffsetFolded,
67           "Number of const offset of index address folded");
68 
69 DEBUG_COUNTER(RegRenamingCounter, DEBUG_TYPE "-reg-renaming",
70               "Controls which pairs are considered for renaming");
71 
72 // The LdStLimit limits how far we search for load/store pairs.
73 static cl::opt<unsigned> LdStLimit("aarch64-load-store-scan-limit",
74                                    cl::init(20), cl::Hidden);
75 
76 // The UpdateLimit limits how far we search for update instructions when we form
77 // pre-/post-index instructions.
78 static cl::opt<unsigned> UpdateLimit("aarch64-update-scan-limit", cl::init(100),
79                                      cl::Hidden);
80 
81 // The LdStConstLimit limits how far we search for const offset instructions
82 // when we form index address load/store instructions.
83 static cl::opt<unsigned> LdStConstLimit("aarch64-load-store-const-scan-limit",
84                                         cl::init(10), cl::Hidden);
85 
86 // Enable register renaming to find additional store pairing opportunities.
87 static cl::opt<bool> EnableRenaming("aarch64-load-store-renaming",
88                                     cl::init(true), cl::Hidden);
89 
90 #define AARCH64_LOAD_STORE_OPT_NAME "AArch64 load / store optimization pass"
91 
92 namespace {
93 
94 using LdStPairFlags = struct LdStPairFlags {
95   // If a matching instruction is found, MergeForward is set to true if the
96   // merge is to remove the first instruction and replace the second with
97   // a pair-wise insn, and false if the reverse is true.
98   bool MergeForward = false;
99 
100   // SExtIdx gives the index of the result of the load pair that must be
101   // extended. The value of SExtIdx assumes that the paired load produces the
102   // value in this order: (I, returned iterator), i.e., -1 means no value has
103   // to be extended, 0 means I, and 1 means the returned iterator.
104   int SExtIdx = -1;
105 
106   // If not none, RenameReg can be used to rename the result register of the
107   // first store in a pair. Currently this only works when merging stores
108   // forward.
109   std::optional<MCPhysReg> RenameReg;
110 
111   LdStPairFlags() = default;
112 
113   void setMergeForward(bool V = true) { MergeForward = V; }
114   bool getMergeForward() const { return MergeForward; }
115 
116   void setSExtIdx(int V) { SExtIdx = V; }
117   int getSExtIdx() const { return SExtIdx; }
118 
119   void setRenameReg(MCPhysReg R) { RenameReg = R; }
120   void clearRenameReg() { RenameReg = std::nullopt; }
121   std::optional<MCPhysReg> getRenameReg() const { return RenameReg; }
122 };
123 
124 struct AArch64LoadStoreOpt : public MachineFunctionPass {
125   static char ID;
126 
127   AArch64LoadStoreOpt() : MachineFunctionPass(ID) {
128     initializeAArch64LoadStoreOptPass(*PassRegistry::getPassRegistry());
129   }
130 
131   AliasAnalysis *AA;
132   const AArch64InstrInfo *TII;
133   const TargetRegisterInfo *TRI;
134   const AArch64Subtarget *Subtarget;
135 
136   // Track which register units have been modified and used.
137   LiveRegUnits ModifiedRegUnits, UsedRegUnits;
138   LiveRegUnits DefinedInBB;
139 
140   void getAnalysisUsage(AnalysisUsage &AU) const override {
141     AU.addRequired<AAResultsWrapperPass>();
142     MachineFunctionPass::getAnalysisUsage(AU);
143   }
144 
145   // Scan the instructions looking for a load/store that can be combined
146   // with the current instruction into a load/store pair.
147   // Return the matching instruction if one is found, else MBB->end().
148   MachineBasicBlock::iterator findMatchingInsn(MachineBasicBlock::iterator I,
149                                                LdStPairFlags &Flags,
150                                                unsigned Limit,
151                                                bool FindNarrowMerge);
152 
153   // Scan the instructions looking for a store that writes to the address from
154   // which the current load instruction reads. Return true if one is found.
155   bool findMatchingStore(MachineBasicBlock::iterator I, unsigned Limit,
156                          MachineBasicBlock::iterator &StoreI);
157 
158   // Merge the two instructions indicated into a wider narrow store instruction.
159   MachineBasicBlock::iterator
160   mergeNarrowZeroStores(MachineBasicBlock::iterator I,
161                         MachineBasicBlock::iterator MergeMI,
162                         const LdStPairFlags &Flags);
163 
164   // Merge the two instructions indicated into a single pair-wise instruction.
165   MachineBasicBlock::iterator
166   mergePairedInsns(MachineBasicBlock::iterator I,
167                    MachineBasicBlock::iterator Paired,
168                    const LdStPairFlags &Flags);
169 
170   // Promote the load that reads directly from the address stored to.
171   MachineBasicBlock::iterator
172   promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
173                        MachineBasicBlock::iterator StoreI);
174 
175   // Scan the instruction list to find a base register update that can
176   // be combined with the current instruction (a load or store) using
177   // pre or post indexed addressing with writeback. Scan forwards.
178   MachineBasicBlock::iterator
179   findMatchingUpdateInsnForward(MachineBasicBlock::iterator I,
180                                 int UnscaledOffset, unsigned Limit);
181 
182   // Scan the instruction list to find a register assigned with a const
183   // value that can be combined with the current instruction (a load or store)
184   // using base addressing with writeback. Scan backwards.
185   MachineBasicBlock::iterator
186   findMatchingConstOffsetBackward(MachineBasicBlock::iterator I, unsigned Limit,
187                                   unsigned &Offset);
188 
189   // Scan the instruction list to find a base register update that can
190   // be combined with the current instruction (a load or store) using
191   // pre or post indexed addressing with writeback. Scan backwards.
192   // `MergeEither` is set to true if the combined instruction may be placed
193   // either at the location of the load/store instruction or at the location of
194   // the update intruction.
195   MachineBasicBlock::iterator
196   findMatchingUpdateInsnBackward(MachineBasicBlock::iterator I, unsigned Limit,
197                                  bool &MergeEither);
198 
199   // Find an instruction that updates the base register of the ld/st
200   // instruction.
201   bool isMatchingUpdateInsn(MachineInstr &MemMI, MachineInstr &MI,
202                             unsigned BaseReg, int Offset);
203 
204   bool isMatchingMovConstInsn(MachineInstr &MemMI, MachineInstr &MI,
205                               unsigned IndexReg, unsigned &Offset);
206 
207   // Merge a pre- or post-index base register update into a ld/st instruction.
208   std::optional<MachineBasicBlock::iterator>
209   mergeUpdateInsn(MachineBasicBlock::iterator I,
210                   MachineBasicBlock::iterator Update, bool IsForward,
211                   bool IsPreIdx, bool MergeEither);
212 
213   MachineBasicBlock::iterator
214   mergeConstOffsetInsn(MachineBasicBlock::iterator I,
215                        MachineBasicBlock::iterator Update, unsigned Offset,
216                        int Scale);
217 
218   // Find and merge zero store instructions.
219   bool tryToMergeZeroStInst(MachineBasicBlock::iterator &MBBI);
220 
221   // Find and pair ldr/str instructions.
222   bool tryToPairLdStInst(MachineBasicBlock::iterator &MBBI);
223 
224   // Find and promote load instructions which read directly from store.
225   bool tryToPromoteLoadFromStore(MachineBasicBlock::iterator &MBBI);
226 
227   // Find and merge a base register updates before or after a ld/st instruction.
228   bool tryToMergeLdStUpdate(MachineBasicBlock::iterator &MBBI);
229 
230   // Find and merge an index ldr/st instruction into a base ld/st instruction.
231   bool tryToMergeIndexLdSt(MachineBasicBlock::iterator &MBBI, int Scale);
232 
233   bool optimizeBlock(MachineBasicBlock &MBB, bool EnableNarrowZeroStOpt);
234 
235   bool runOnMachineFunction(MachineFunction &Fn) override;
236 
237   MachineFunctionProperties getRequiredProperties() const override {
238     return MachineFunctionProperties().set(
239         MachineFunctionProperties::Property::NoVRegs);
240   }
241 
242   StringRef getPassName() const override { return AARCH64_LOAD_STORE_OPT_NAME; }
243 };
244 
245 char AArch64LoadStoreOpt::ID = 0;
246 
247 } // end anonymous namespace
248 
249 INITIALIZE_PASS(AArch64LoadStoreOpt, "aarch64-ldst-opt",
250                 AARCH64_LOAD_STORE_OPT_NAME, false, false)
251 
252 static bool isNarrowStore(unsigned Opc) {
253   switch (Opc) {
254   default:
255     return false;
256   case AArch64::STRBBui:
257   case AArch64::STURBBi:
258   case AArch64::STRHHui:
259   case AArch64::STURHHi:
260     return true;
261   }
262 }
263 
264 // These instruction set memory tag and either keep memory contents unchanged or
265 // set it to zero, ignoring the address part of the source register.
266 static bool isTagStore(const MachineInstr &MI) {
267   switch (MI.getOpcode()) {
268   default:
269     return false;
270   case AArch64::STGi:
271   case AArch64::STZGi:
272   case AArch64::ST2Gi:
273   case AArch64::STZ2Gi:
274     return true;
275   }
276 }
277 
278 static unsigned getMatchingNonSExtOpcode(unsigned Opc,
279                                          bool *IsValidLdStrOpc = nullptr) {
280   if (IsValidLdStrOpc)
281     *IsValidLdStrOpc = true;
282   switch (Opc) {
283   default:
284     if (IsValidLdStrOpc)
285       *IsValidLdStrOpc = false;
286     return std::numeric_limits<unsigned>::max();
287   case AArch64::STRDui:
288   case AArch64::STURDi:
289   case AArch64::STRDpre:
290   case AArch64::STRQui:
291   case AArch64::STURQi:
292   case AArch64::STRQpre:
293   case AArch64::STRBBui:
294   case AArch64::STURBBi:
295   case AArch64::STRHHui:
296   case AArch64::STURHHi:
297   case AArch64::STRWui:
298   case AArch64::STRWpre:
299   case AArch64::STURWi:
300   case AArch64::STRXui:
301   case AArch64::STRXpre:
302   case AArch64::STURXi:
303   case AArch64::LDRDui:
304   case AArch64::LDURDi:
305   case AArch64::LDRDpre:
306   case AArch64::LDRQui:
307   case AArch64::LDURQi:
308   case AArch64::LDRQpre:
309   case AArch64::LDRWui:
310   case AArch64::LDURWi:
311   case AArch64::LDRWpre:
312   case AArch64::LDRXui:
313   case AArch64::LDURXi:
314   case AArch64::LDRXpre:
315   case AArch64::STRSui:
316   case AArch64::STURSi:
317   case AArch64::STRSpre:
318   case AArch64::LDRSui:
319   case AArch64::LDURSi:
320   case AArch64::LDRSpre:
321     return Opc;
322   case AArch64::LDRSWui:
323     return AArch64::LDRWui;
324   case AArch64::LDURSWi:
325     return AArch64::LDURWi;
326   case AArch64::LDRSWpre:
327     return AArch64::LDRWpre;
328   }
329 }
330 
331 static unsigned getMatchingWideOpcode(unsigned Opc) {
332   switch (Opc) {
333   default:
334     llvm_unreachable("Opcode has no wide equivalent!");
335   case AArch64::STRBBui:
336     return AArch64::STRHHui;
337   case AArch64::STRHHui:
338     return AArch64::STRWui;
339   case AArch64::STURBBi:
340     return AArch64::STURHHi;
341   case AArch64::STURHHi:
342     return AArch64::STURWi;
343   case AArch64::STURWi:
344     return AArch64::STURXi;
345   case AArch64::STRWui:
346     return AArch64::STRXui;
347   }
348 }
349 
350 static unsigned getMatchingPairOpcode(unsigned Opc) {
351   switch (Opc) {
352   default:
353     llvm_unreachable("Opcode has no pairwise equivalent!");
354   case AArch64::STRSui:
355   case AArch64::STURSi:
356     return AArch64::STPSi;
357   case AArch64::STRSpre:
358     return AArch64::STPSpre;
359   case AArch64::STRDui:
360   case AArch64::STURDi:
361     return AArch64::STPDi;
362   case AArch64::STRDpre:
363     return AArch64::STPDpre;
364   case AArch64::STRQui:
365   case AArch64::STURQi:
366     return AArch64::STPQi;
367   case AArch64::STRQpre:
368     return AArch64::STPQpre;
369   case AArch64::STRWui:
370   case AArch64::STURWi:
371     return AArch64::STPWi;
372   case AArch64::STRWpre:
373     return AArch64::STPWpre;
374   case AArch64::STRXui:
375   case AArch64::STURXi:
376     return AArch64::STPXi;
377   case AArch64::STRXpre:
378     return AArch64::STPXpre;
379   case AArch64::LDRSui:
380   case AArch64::LDURSi:
381     return AArch64::LDPSi;
382   case AArch64::LDRSpre:
383     return AArch64::LDPSpre;
384   case AArch64::LDRDui:
385   case AArch64::LDURDi:
386     return AArch64::LDPDi;
387   case AArch64::LDRDpre:
388     return AArch64::LDPDpre;
389   case AArch64::LDRQui:
390   case AArch64::LDURQi:
391     return AArch64::LDPQi;
392   case AArch64::LDRQpre:
393     return AArch64::LDPQpre;
394   case AArch64::LDRWui:
395   case AArch64::LDURWi:
396     return AArch64::LDPWi;
397   case AArch64::LDRWpre:
398     return AArch64::LDPWpre;
399   case AArch64::LDRXui:
400   case AArch64::LDURXi:
401     return AArch64::LDPXi;
402   case AArch64::LDRXpre:
403     return AArch64::LDPXpre;
404   case AArch64::LDRSWui:
405   case AArch64::LDURSWi:
406     return AArch64::LDPSWi;
407   case AArch64::LDRSWpre:
408     return AArch64::LDPSWpre;
409   }
410 }
411 
412 static unsigned isMatchingStore(MachineInstr &LoadInst,
413                                 MachineInstr &StoreInst) {
414   unsigned LdOpc = LoadInst.getOpcode();
415   unsigned StOpc = StoreInst.getOpcode();
416   switch (LdOpc) {
417   default:
418     llvm_unreachable("Unsupported load instruction!");
419   case AArch64::LDRBBui:
420     return StOpc == AArch64::STRBBui || StOpc == AArch64::STRHHui ||
421            StOpc == AArch64::STRWui || StOpc == AArch64::STRXui;
422   case AArch64::LDURBBi:
423     return StOpc == AArch64::STURBBi || StOpc == AArch64::STURHHi ||
424            StOpc == AArch64::STURWi || StOpc == AArch64::STURXi;
425   case AArch64::LDRHHui:
426     return StOpc == AArch64::STRHHui || StOpc == AArch64::STRWui ||
427            StOpc == AArch64::STRXui;
428   case AArch64::LDURHHi:
429     return StOpc == AArch64::STURHHi || StOpc == AArch64::STURWi ||
430            StOpc == AArch64::STURXi;
431   case AArch64::LDRWui:
432     return StOpc == AArch64::STRWui || StOpc == AArch64::STRXui;
433   case AArch64::LDURWi:
434     return StOpc == AArch64::STURWi || StOpc == AArch64::STURXi;
435   case AArch64::LDRXui:
436     return StOpc == AArch64::STRXui;
437   case AArch64::LDURXi:
438     return StOpc == AArch64::STURXi;
439   }
440 }
441 
442 static unsigned getPreIndexedOpcode(unsigned Opc) {
443   // FIXME: We don't currently support creating pre-indexed loads/stores when
444   // the load or store is the unscaled version.  If we decide to perform such an
445   // optimization in the future the cases for the unscaled loads/stores will
446   // need to be added here.
447   switch (Opc) {
448   default:
449     llvm_unreachable("Opcode has no pre-indexed equivalent!");
450   case AArch64::STRSui:
451     return AArch64::STRSpre;
452   case AArch64::STRDui:
453     return AArch64::STRDpre;
454   case AArch64::STRQui:
455     return AArch64::STRQpre;
456   case AArch64::STRBBui:
457     return AArch64::STRBBpre;
458   case AArch64::STRHHui:
459     return AArch64::STRHHpre;
460   case AArch64::STRWui:
461     return AArch64::STRWpre;
462   case AArch64::STRXui:
463     return AArch64::STRXpre;
464   case AArch64::LDRSui:
465     return AArch64::LDRSpre;
466   case AArch64::LDRDui:
467     return AArch64::LDRDpre;
468   case AArch64::LDRQui:
469     return AArch64::LDRQpre;
470   case AArch64::LDRBBui:
471     return AArch64::LDRBBpre;
472   case AArch64::LDRHHui:
473     return AArch64::LDRHHpre;
474   case AArch64::LDRWui:
475     return AArch64::LDRWpre;
476   case AArch64::LDRXui:
477     return AArch64::LDRXpre;
478   case AArch64::LDRSWui:
479     return AArch64::LDRSWpre;
480   case AArch64::LDPSi:
481     return AArch64::LDPSpre;
482   case AArch64::LDPSWi:
483     return AArch64::LDPSWpre;
484   case AArch64::LDPDi:
485     return AArch64::LDPDpre;
486   case AArch64::LDPQi:
487     return AArch64::LDPQpre;
488   case AArch64::LDPWi:
489     return AArch64::LDPWpre;
490   case AArch64::LDPXi:
491     return AArch64::LDPXpre;
492   case AArch64::STPSi:
493     return AArch64::STPSpre;
494   case AArch64::STPDi:
495     return AArch64::STPDpre;
496   case AArch64::STPQi:
497     return AArch64::STPQpre;
498   case AArch64::STPWi:
499     return AArch64::STPWpre;
500   case AArch64::STPXi:
501     return AArch64::STPXpre;
502   case AArch64::STGi:
503     return AArch64::STGPreIndex;
504   case AArch64::STZGi:
505     return AArch64::STZGPreIndex;
506   case AArch64::ST2Gi:
507     return AArch64::ST2GPreIndex;
508   case AArch64::STZ2Gi:
509     return AArch64::STZ2GPreIndex;
510   case AArch64::STGPi:
511     return AArch64::STGPpre;
512   }
513 }
514 
515 static unsigned getBaseAddressOpcode(unsigned Opc) {
516   // TODO: Add more index address stores.
517   switch (Opc) {
518   default:
519     llvm_unreachable("Opcode has no base address equivalent!");
520   case AArch64::LDRBroX:
521     return AArch64::LDRBui;
522   case AArch64::LDRBBroX:
523     return AArch64::LDRBBui;
524   case AArch64::LDRSBXroX:
525     return AArch64::LDRSBXui;
526   case AArch64::LDRSBWroX:
527     return AArch64::LDRSBWui;
528   case AArch64::LDRHroX:
529     return AArch64::LDRHui;
530   case AArch64::LDRHHroX:
531     return AArch64::LDRHHui;
532   case AArch64::LDRSHXroX:
533     return AArch64::LDRSHXui;
534   case AArch64::LDRSHWroX:
535     return AArch64::LDRSHWui;
536   case AArch64::LDRWroX:
537     return AArch64::LDRWui;
538   case AArch64::LDRSroX:
539     return AArch64::LDRSui;
540   case AArch64::LDRSWroX:
541     return AArch64::LDRSWui;
542   case AArch64::LDRDroX:
543     return AArch64::LDRDui;
544   case AArch64::LDRXroX:
545     return AArch64::LDRXui;
546   case AArch64::LDRQroX:
547     return AArch64::LDRQui;
548   }
549 }
550 
551 static unsigned getPostIndexedOpcode(unsigned Opc) {
552   switch (Opc) {
553   default:
554     llvm_unreachable("Opcode has no post-indexed wise equivalent!");
555   case AArch64::STRSui:
556   case AArch64::STURSi:
557     return AArch64::STRSpost;
558   case AArch64::STRDui:
559   case AArch64::STURDi:
560     return AArch64::STRDpost;
561   case AArch64::STRQui:
562   case AArch64::STURQi:
563     return AArch64::STRQpost;
564   case AArch64::STRBBui:
565     return AArch64::STRBBpost;
566   case AArch64::STRHHui:
567     return AArch64::STRHHpost;
568   case AArch64::STRWui:
569   case AArch64::STURWi:
570     return AArch64::STRWpost;
571   case AArch64::STRXui:
572   case AArch64::STURXi:
573     return AArch64::STRXpost;
574   case AArch64::LDRSui:
575   case AArch64::LDURSi:
576     return AArch64::LDRSpost;
577   case AArch64::LDRDui:
578   case AArch64::LDURDi:
579     return AArch64::LDRDpost;
580   case AArch64::LDRQui:
581   case AArch64::LDURQi:
582     return AArch64::LDRQpost;
583   case AArch64::LDRBBui:
584     return AArch64::LDRBBpost;
585   case AArch64::LDRHHui:
586     return AArch64::LDRHHpost;
587   case AArch64::LDRWui:
588   case AArch64::LDURWi:
589     return AArch64::LDRWpost;
590   case AArch64::LDRXui:
591   case AArch64::LDURXi:
592     return AArch64::LDRXpost;
593   case AArch64::LDRSWui:
594     return AArch64::LDRSWpost;
595   case AArch64::LDPSi:
596     return AArch64::LDPSpost;
597   case AArch64::LDPSWi:
598     return AArch64::LDPSWpost;
599   case AArch64::LDPDi:
600     return AArch64::LDPDpost;
601   case AArch64::LDPQi:
602     return AArch64::LDPQpost;
603   case AArch64::LDPWi:
604     return AArch64::LDPWpost;
605   case AArch64::LDPXi:
606     return AArch64::LDPXpost;
607   case AArch64::STPSi:
608     return AArch64::STPSpost;
609   case AArch64::STPDi:
610     return AArch64::STPDpost;
611   case AArch64::STPQi:
612     return AArch64::STPQpost;
613   case AArch64::STPWi:
614     return AArch64::STPWpost;
615   case AArch64::STPXi:
616     return AArch64::STPXpost;
617   case AArch64::STGi:
618     return AArch64::STGPostIndex;
619   case AArch64::STZGi:
620     return AArch64::STZGPostIndex;
621   case AArch64::ST2Gi:
622     return AArch64::ST2GPostIndex;
623   case AArch64::STZ2Gi:
624     return AArch64::STZ2GPostIndex;
625   case AArch64::STGPi:
626     return AArch64::STGPpost;
627   }
628 }
629 
630 static bool isPreLdStPairCandidate(MachineInstr &FirstMI, MachineInstr &MI) {
631 
632   unsigned OpcA = FirstMI.getOpcode();
633   unsigned OpcB = MI.getOpcode();
634 
635   switch (OpcA) {
636   default:
637     return false;
638   case AArch64::STRSpre:
639     return (OpcB == AArch64::STRSui) || (OpcB == AArch64::STURSi);
640   case AArch64::STRDpre:
641     return (OpcB == AArch64::STRDui) || (OpcB == AArch64::STURDi);
642   case AArch64::STRQpre:
643     return (OpcB == AArch64::STRQui) || (OpcB == AArch64::STURQi);
644   case AArch64::STRWpre:
645     return (OpcB == AArch64::STRWui) || (OpcB == AArch64::STURWi);
646   case AArch64::STRXpre:
647     return (OpcB == AArch64::STRXui) || (OpcB == AArch64::STURXi);
648   case AArch64::LDRSpre:
649     return (OpcB == AArch64::LDRSui) || (OpcB == AArch64::LDURSi);
650   case AArch64::LDRDpre:
651     return (OpcB == AArch64::LDRDui) || (OpcB == AArch64::LDURDi);
652   case AArch64::LDRQpre:
653     return (OpcB == AArch64::LDRQui) || (OpcB == AArch64::LDURQi);
654   case AArch64::LDRWpre:
655     return (OpcB == AArch64::LDRWui) || (OpcB == AArch64::LDURWi);
656   case AArch64::LDRXpre:
657     return (OpcB == AArch64::LDRXui) || (OpcB == AArch64::LDURXi);
658   case AArch64::LDRSWpre:
659     return (OpcB == AArch64::LDRSWui) || (OpcB == AArch64::LDURSWi);
660   }
661 }
662 
663 // Returns the scale and offset range of pre/post indexed variants of MI.
664 static void getPrePostIndexedMemOpInfo(const MachineInstr &MI, int &Scale,
665                                        int &MinOffset, int &MaxOffset) {
666   bool IsPaired = AArch64InstrInfo::isPairedLdSt(MI);
667   bool IsTagStore = isTagStore(MI);
668   // ST*G and all paired ldst have the same scale in pre/post-indexed variants
669   // as in the "unsigned offset" variant.
670   // All other pre/post indexed ldst instructions are unscaled.
671   Scale = (IsTagStore || IsPaired) ? AArch64InstrInfo::getMemScale(MI) : 1;
672 
673   if (IsPaired) {
674     MinOffset = -64;
675     MaxOffset = 63;
676   } else {
677     MinOffset = -256;
678     MaxOffset = 255;
679   }
680 }
681 
682 static MachineOperand &getLdStRegOp(MachineInstr &MI,
683                                     unsigned PairedRegOp = 0) {
684   assert(PairedRegOp < 2 && "Unexpected register operand idx.");
685   bool IsPreLdSt = AArch64InstrInfo::isPreLdSt(MI);
686   if (IsPreLdSt)
687     PairedRegOp += 1;
688   unsigned Idx =
689       AArch64InstrInfo::isPairedLdSt(MI) || IsPreLdSt ? PairedRegOp : 0;
690   return MI.getOperand(Idx);
691 }
692 
693 static bool isLdOffsetInRangeOfSt(MachineInstr &LoadInst,
694                                   MachineInstr &StoreInst,
695                                   const AArch64InstrInfo *TII) {
696   assert(isMatchingStore(LoadInst, StoreInst) && "Expect only matched ld/st.");
697   int LoadSize = TII->getMemScale(LoadInst);
698   int StoreSize = TII->getMemScale(StoreInst);
699   int UnscaledStOffset =
700       TII->hasUnscaledLdStOffset(StoreInst)
701           ? AArch64InstrInfo::getLdStOffsetOp(StoreInst).getImm()
702           : AArch64InstrInfo::getLdStOffsetOp(StoreInst).getImm() * StoreSize;
703   int UnscaledLdOffset =
704       TII->hasUnscaledLdStOffset(LoadInst)
705           ? AArch64InstrInfo::getLdStOffsetOp(LoadInst).getImm()
706           : AArch64InstrInfo::getLdStOffsetOp(LoadInst).getImm() * LoadSize;
707   return (UnscaledStOffset <= UnscaledLdOffset) &&
708          (UnscaledLdOffset + LoadSize <= (UnscaledStOffset + StoreSize));
709 }
710 
711 static bool isPromotableZeroStoreInst(MachineInstr &MI) {
712   unsigned Opc = MI.getOpcode();
713   return (Opc == AArch64::STRWui || Opc == AArch64::STURWi ||
714           isNarrowStore(Opc)) &&
715          getLdStRegOp(MI).getReg() == AArch64::WZR;
716 }
717 
718 static bool isPromotableLoadFromStore(MachineInstr &MI) {
719   switch (MI.getOpcode()) {
720   default:
721     return false;
722   // Scaled instructions.
723   case AArch64::LDRBBui:
724   case AArch64::LDRHHui:
725   case AArch64::LDRWui:
726   case AArch64::LDRXui:
727   // Unscaled instructions.
728   case AArch64::LDURBBi:
729   case AArch64::LDURHHi:
730   case AArch64::LDURWi:
731   case AArch64::LDURXi:
732     return true;
733   }
734 }
735 
736 static bool isMergeableLdStUpdate(MachineInstr &MI, AArch64FunctionInfo &AFI) {
737   unsigned Opc = MI.getOpcode();
738   switch (Opc) {
739   default:
740     return false;
741   // Scaled instructions.
742   case AArch64::STRSui:
743   case AArch64::STRDui:
744   case AArch64::STRQui:
745   case AArch64::STRXui:
746   case AArch64::STRWui:
747   case AArch64::STRHHui:
748   case AArch64::STRBBui:
749   case AArch64::LDRSui:
750   case AArch64::LDRDui:
751   case AArch64::LDRQui:
752   case AArch64::LDRXui:
753   case AArch64::LDRWui:
754   case AArch64::LDRHHui:
755   case AArch64::LDRBBui:
756   case AArch64::STGi:
757   case AArch64::STZGi:
758   case AArch64::ST2Gi:
759   case AArch64::STZ2Gi:
760   case AArch64::STGPi:
761   // Unscaled instructions.
762   case AArch64::STURSi:
763   case AArch64::STURDi:
764   case AArch64::STURQi:
765   case AArch64::STURWi:
766   case AArch64::STURXi:
767   case AArch64::LDURSi:
768   case AArch64::LDURDi:
769   case AArch64::LDURQi:
770   case AArch64::LDURWi:
771   case AArch64::LDURXi:
772   // Paired instructions.
773   case AArch64::LDPSi:
774   case AArch64::LDPSWi:
775   case AArch64::LDPDi:
776   case AArch64::LDPQi:
777   case AArch64::LDPWi:
778   case AArch64::LDPXi:
779   case AArch64::STPSi:
780   case AArch64::STPDi:
781   case AArch64::STPQi:
782   case AArch64::STPWi:
783   case AArch64::STPXi:
784     // Make sure this is a reg+imm (as opposed to an address reloc).
785     if (!AArch64InstrInfo::getLdStOffsetOp(MI).isImm())
786       return false;
787 
788     // When using stack tagging, simple sp+imm loads and stores are not
789     // tag-checked, but pre- and post-indexed versions of them are, so we can't
790     // replace the former with the latter. This transformation would be valid
791     // if the load/store accesses an untagged stack slot, but we don't have
792     // that information available after frame indices have been eliminated.
793     if (AFI.isMTETagged() &&
794         AArch64InstrInfo::getLdStBaseOp(MI).getReg() == AArch64::SP)
795       return false;
796 
797     return true;
798   }
799 }
800 
801 // Make sure this is a reg+reg Ld/St
802 static bool isMergeableIndexLdSt(MachineInstr &MI, int &Scale) {
803   unsigned Opc = MI.getOpcode();
804   switch (Opc) {
805   default:
806     return false;
807   // Scaled instructions.
808   // TODO: Add more index address stores.
809   case AArch64::LDRBroX:
810   case AArch64::LDRBBroX:
811   case AArch64::LDRSBXroX:
812   case AArch64::LDRSBWroX:
813     Scale = 1;
814     return true;
815   case AArch64::LDRHroX:
816   case AArch64::LDRHHroX:
817   case AArch64::LDRSHXroX:
818   case AArch64::LDRSHWroX:
819     Scale = 2;
820     return true;
821   case AArch64::LDRWroX:
822   case AArch64::LDRSroX:
823   case AArch64::LDRSWroX:
824     Scale = 4;
825     return true;
826   case AArch64::LDRDroX:
827   case AArch64::LDRXroX:
828     Scale = 8;
829     return true;
830   case AArch64::LDRQroX:
831     Scale = 16;
832     return true;
833   }
834 }
835 
836 static bool isRewritableImplicitDef(unsigned Opc) {
837   switch (Opc) {
838   default:
839     return false;
840   case AArch64::ORRWrs:
841   case AArch64::ADDWri:
842     return true;
843   }
844 }
845 
846 MachineBasicBlock::iterator
847 AArch64LoadStoreOpt::mergeNarrowZeroStores(MachineBasicBlock::iterator I,
848                                            MachineBasicBlock::iterator MergeMI,
849                                            const LdStPairFlags &Flags) {
850   assert(isPromotableZeroStoreInst(*I) && isPromotableZeroStoreInst(*MergeMI) &&
851          "Expected promotable zero stores.");
852 
853   MachineBasicBlock::iterator E = I->getParent()->end();
854   MachineBasicBlock::iterator NextI = next_nodbg(I, E);
855   // If NextI is the second of the two instructions to be merged, we need
856   // to skip one further. Either way we merge will invalidate the iterator,
857   // and we don't need to scan the new instruction, as it's a pairwise
858   // instruction, which we're not considering for further action anyway.
859   if (NextI == MergeMI)
860     NextI = next_nodbg(NextI, E);
861 
862   unsigned Opc = I->getOpcode();
863   unsigned MergeMIOpc = MergeMI->getOpcode();
864   bool IsScaled = !TII->hasUnscaledLdStOffset(Opc);
865   bool IsMergedMIScaled = !TII->hasUnscaledLdStOffset(MergeMIOpc);
866   int OffsetStride = IsScaled ? TII->getMemScale(*I) : 1;
867   int MergeMIOffsetStride = IsMergedMIScaled ? TII->getMemScale(*MergeMI) : 1;
868 
869   bool MergeForward = Flags.getMergeForward();
870   // Insert our new paired instruction after whichever of the paired
871   // instructions MergeForward indicates.
872   MachineBasicBlock::iterator InsertionPoint = MergeForward ? MergeMI : I;
873   // Also based on MergeForward is from where we copy the base register operand
874   // so we get the flags compatible with the input code.
875   const MachineOperand &BaseRegOp =
876       MergeForward ? AArch64InstrInfo::getLdStBaseOp(*MergeMI)
877                    : AArch64InstrInfo::getLdStBaseOp(*I);
878 
879   // Which register is Rt and which is Rt2 depends on the offset order.
880   int64_t IOffsetInBytes =
881       AArch64InstrInfo::getLdStOffsetOp(*I).getImm() * OffsetStride;
882   int64_t MIOffsetInBytes =
883       AArch64InstrInfo::getLdStOffsetOp(*MergeMI).getImm() *
884       MergeMIOffsetStride;
885   // Select final offset based on the offset order.
886   int64_t OffsetImm;
887   if (IOffsetInBytes > MIOffsetInBytes)
888     OffsetImm = MIOffsetInBytes;
889   else
890     OffsetImm = IOffsetInBytes;
891 
892   int NewOpcode = getMatchingWideOpcode(Opc);
893   bool FinalIsScaled = !TII->hasUnscaledLdStOffset(NewOpcode);
894 
895   // Adjust final offset if the result opcode is a scaled store.
896   if (FinalIsScaled) {
897     int NewOffsetStride = FinalIsScaled ? TII->getMemScale(NewOpcode) : 1;
898     assert(((OffsetImm % NewOffsetStride) == 0) &&
899            "Offset should be a multiple of the store memory scale");
900     OffsetImm = OffsetImm / NewOffsetStride;
901   }
902 
903   // Construct the new instruction.
904   DebugLoc DL = I->getDebugLoc();
905   MachineBasicBlock *MBB = I->getParent();
906   MachineInstrBuilder MIB;
907   MIB = BuildMI(*MBB, InsertionPoint, DL, TII->get(getMatchingWideOpcode(Opc)))
908             .addReg(isNarrowStore(Opc) ? AArch64::WZR : AArch64::XZR)
909             .add(BaseRegOp)
910             .addImm(OffsetImm)
911             .cloneMergedMemRefs({&*I, &*MergeMI})
912             .setMIFlags(I->mergeFlagsWith(*MergeMI));
913   (void)MIB;
914 
915   LLVM_DEBUG(dbgs() << "Creating wider store. Replacing instructions:\n    ");
916   LLVM_DEBUG(I->print(dbgs()));
917   LLVM_DEBUG(dbgs() << "    ");
918   LLVM_DEBUG(MergeMI->print(dbgs()));
919   LLVM_DEBUG(dbgs() << "  with instruction:\n    ");
920   LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));
921   LLVM_DEBUG(dbgs() << "\n");
922 
923   // Erase the old instructions.
924   I->eraseFromParent();
925   MergeMI->eraseFromParent();
926   return NextI;
927 }
928 
929 // Apply Fn to all instructions between MI and the beginning of the block, until
930 // a def for DefReg is reached. Returns true, iff Fn returns true for all
931 // visited instructions. Stop after visiting Limit iterations.
932 static bool forAllMIsUntilDef(MachineInstr &MI, MCPhysReg DefReg,
933                               const TargetRegisterInfo *TRI, unsigned Limit,
934                               std::function<bool(MachineInstr &, bool)> &Fn) {
935   auto MBB = MI.getParent();
936   for (MachineInstr &I :
937        instructionsWithoutDebug(MI.getReverseIterator(), MBB->instr_rend())) {
938     if (!Limit)
939       return false;
940     --Limit;
941 
942     bool isDef = any_of(I.operands(), [DefReg, TRI](MachineOperand &MOP) {
943       return MOP.isReg() && MOP.isDef() && !MOP.isDebug() && MOP.getReg() &&
944              TRI->regsOverlap(MOP.getReg(), DefReg);
945     });
946     if (!Fn(I, isDef))
947       return false;
948     if (isDef)
949       break;
950   }
951   return true;
952 }
953 
954 static void updateDefinedRegisters(MachineInstr &MI, LiveRegUnits &Units,
955                                    const TargetRegisterInfo *TRI) {
956 
957   for (const MachineOperand &MOP : phys_regs_and_masks(MI))
958     if (MOP.isReg() && MOP.isKill())
959       Units.removeReg(MOP.getReg());
960 
961   for (const MachineOperand &MOP : phys_regs_and_masks(MI))
962     if (MOP.isReg() && !MOP.isKill())
963       Units.addReg(MOP.getReg());
964 }
965 
966 MachineBasicBlock::iterator
967 AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
968                                       MachineBasicBlock::iterator Paired,
969                                       const LdStPairFlags &Flags) {
970   MachineBasicBlock::iterator E = I->getParent()->end();
971   MachineBasicBlock::iterator NextI = next_nodbg(I, E);
972   // If NextI is the second of the two instructions to be merged, we need
973   // to skip one further. Either way we merge will invalidate the iterator,
974   // and we don't need to scan the new instruction, as it's a pairwise
975   // instruction, which we're not considering for further action anyway.
976   if (NextI == Paired)
977     NextI = next_nodbg(NextI, E);
978 
979   int SExtIdx = Flags.getSExtIdx();
980   unsigned Opc =
981       SExtIdx == -1 ? I->getOpcode() : getMatchingNonSExtOpcode(I->getOpcode());
982   bool IsUnscaled = TII->hasUnscaledLdStOffset(Opc);
983   int OffsetStride = IsUnscaled ? TII->getMemScale(*I) : 1;
984 
985   bool MergeForward = Flags.getMergeForward();
986 
987   std::optional<MCPhysReg> RenameReg = Flags.getRenameReg();
988   if (RenameReg) {
989     MCRegister RegToRename = getLdStRegOp(*I).getReg();
990     DefinedInBB.addReg(*RenameReg);
991 
992     // Return the sub/super register for RenameReg, matching the size of
993     // OriginalReg.
994     auto GetMatchingSubReg =
995         [this, RenameReg](const TargetRegisterClass *C) -> MCPhysReg {
996       for (MCPhysReg SubOrSuper :
997            TRI->sub_and_superregs_inclusive(*RenameReg)) {
998         if (C->contains(SubOrSuper))
999           return SubOrSuper;
1000       }
1001       llvm_unreachable("Should have found matching sub or super register!");
1002     };
1003 
1004     std::function<bool(MachineInstr &, bool)> UpdateMIs =
1005         [this, RegToRename, GetMatchingSubReg, MergeForward](MachineInstr &MI,
1006                                                              bool IsDef) {
1007           if (IsDef) {
1008             bool SeenDef = false;
1009             for (unsigned OpIdx = 0; OpIdx < MI.getNumOperands(); ++OpIdx) {
1010               MachineOperand &MOP = MI.getOperand(OpIdx);
1011               // Rename the first explicit definition and all implicit
1012               // definitions matching RegToRename.
1013               if (MOP.isReg() && !MOP.isDebug() && MOP.getReg() &&
1014                   (!MergeForward || !SeenDef ||
1015                    (MOP.isDef() && MOP.isImplicit())) &&
1016                   TRI->regsOverlap(MOP.getReg(), RegToRename)) {
1017                 assert((MOP.isImplicit() ||
1018                         (MOP.isRenamable() && !MOP.isEarlyClobber())) &&
1019                        "Need renamable operands");
1020                 Register MatchingReg;
1021                 if (const TargetRegisterClass *RC =
1022                         MI.getRegClassConstraint(OpIdx, TII, TRI))
1023                   MatchingReg = GetMatchingSubReg(RC);
1024                 else {
1025                   if (!isRewritableImplicitDef(MI.getOpcode()))
1026                     continue;
1027                   MatchingReg = GetMatchingSubReg(
1028                       TRI->getMinimalPhysRegClass(MOP.getReg()));
1029                 }
1030                 MOP.setReg(MatchingReg);
1031                 SeenDef = true;
1032               }
1033             }
1034           } else {
1035             for (unsigned OpIdx = 0; OpIdx < MI.getNumOperands(); ++OpIdx) {
1036               MachineOperand &MOP = MI.getOperand(OpIdx);
1037               if (MOP.isReg() && !MOP.isDebug() && MOP.getReg() &&
1038                   TRI->regsOverlap(MOP.getReg(), RegToRename)) {
1039                 assert((MOP.isImplicit() ||
1040                         (MOP.isRenamable() && !MOP.isEarlyClobber())) &&
1041                            "Need renamable operands");
1042                 Register MatchingReg;
1043                 if (const TargetRegisterClass *RC =
1044                         MI.getRegClassConstraint(OpIdx, TII, TRI))
1045                   MatchingReg = GetMatchingSubReg(RC);
1046                 else
1047                   MatchingReg = GetMatchingSubReg(
1048                       TRI->getMinimalPhysRegClass(MOP.getReg()));
1049                 assert(MatchingReg != AArch64::NoRegister &&
1050                        "Cannot find matching regs for renaming");
1051                 MOP.setReg(MatchingReg);
1052               }
1053             }
1054           }
1055           LLVM_DEBUG(dbgs() << "Renamed " << MI);
1056           return true;
1057         };
1058     forAllMIsUntilDef(MergeForward ? *I : *std::prev(Paired), RegToRename, TRI,
1059                       UINT32_MAX, UpdateMIs);
1060 
1061 #if !defined(NDEBUG)
1062     // For forward merging store:
1063     // Make sure the register used for renaming is not used between the
1064     // paired instructions. That would trash the content before the new
1065     // paired instruction.
1066     MCPhysReg RegToCheck = *RenameReg;
1067     // For backward merging load:
1068     // Make sure the register being renamed is not used between the
1069     // paired instructions. That would trash the content after the new
1070     // paired instruction.
1071     if (!MergeForward)
1072       RegToCheck = RegToRename;
1073     for (auto &MI :
1074          iterator_range<MachineInstrBundleIterator<llvm::MachineInstr>>(
1075              MergeForward ? std::next(I) : I,
1076              MergeForward ? std::next(Paired) : Paired))
1077       assert(all_of(MI.operands(),
1078                     [this, RegToCheck](const MachineOperand &MOP) {
1079                       return !MOP.isReg() || MOP.isDebug() || !MOP.getReg() ||
1080                              MOP.isUndef() ||
1081                              !TRI->regsOverlap(MOP.getReg(), RegToCheck);
1082                     }) &&
1083              "Rename register used between paired instruction, trashing the "
1084              "content");
1085 #endif
1086   }
1087 
1088   // Insert our new paired instruction after whichever of the paired
1089   // instructions MergeForward indicates.
1090   MachineBasicBlock::iterator InsertionPoint = MergeForward ? Paired : I;
1091   // Also based on MergeForward is from where we copy the base register operand
1092   // so we get the flags compatible with the input code.
1093   const MachineOperand &BaseRegOp =
1094       MergeForward ? AArch64InstrInfo::getLdStBaseOp(*Paired)
1095                    : AArch64InstrInfo::getLdStBaseOp(*I);
1096 
1097   int Offset = AArch64InstrInfo::getLdStOffsetOp(*I).getImm();
1098   int PairedOffset = AArch64InstrInfo::getLdStOffsetOp(*Paired).getImm();
1099   bool PairedIsUnscaled = TII->hasUnscaledLdStOffset(Paired->getOpcode());
1100   if (IsUnscaled != PairedIsUnscaled) {
1101     // We're trying to pair instructions that differ in how they are scaled.  If
1102     // I is scaled then scale the offset of Paired accordingly.  Otherwise, do
1103     // the opposite (i.e., make Paired's offset unscaled).
1104     int MemSize = TII->getMemScale(*Paired);
1105     if (PairedIsUnscaled) {
1106       // If the unscaled offset isn't a multiple of the MemSize, we can't
1107       // pair the operations together.
1108       assert(!(PairedOffset % TII->getMemScale(*Paired)) &&
1109              "Offset should be a multiple of the stride!");
1110       PairedOffset /= MemSize;
1111     } else {
1112       PairedOffset *= MemSize;
1113     }
1114   }
1115 
1116   // Which register is Rt and which is Rt2 depends on the offset order.
1117   // However, for pre load/stores the Rt should be the one of the pre
1118   // load/store.
1119   MachineInstr *RtMI, *Rt2MI;
1120   if (Offset == PairedOffset + OffsetStride &&
1121       !AArch64InstrInfo::isPreLdSt(*I)) {
1122     RtMI = &*Paired;
1123     Rt2MI = &*I;
1124     // Here we swapped the assumption made for SExtIdx.
1125     // I.e., we turn ldp I, Paired into ldp Paired, I.
1126     // Update the index accordingly.
1127     if (SExtIdx != -1)
1128       SExtIdx = (SExtIdx + 1) % 2;
1129   } else {
1130     RtMI = &*I;
1131     Rt2MI = &*Paired;
1132   }
1133   int OffsetImm = AArch64InstrInfo::getLdStOffsetOp(*RtMI).getImm();
1134   // Scale the immediate offset, if necessary.
1135   if (TII->hasUnscaledLdStOffset(RtMI->getOpcode())) {
1136     assert(!(OffsetImm % TII->getMemScale(*RtMI)) &&
1137            "Unscaled offset cannot be scaled.");
1138     OffsetImm /= TII->getMemScale(*RtMI);
1139   }
1140 
1141   // Construct the new instruction.
1142   MachineInstrBuilder MIB;
1143   DebugLoc DL = I->getDebugLoc();
1144   MachineBasicBlock *MBB = I->getParent();
1145   MachineOperand RegOp0 = getLdStRegOp(*RtMI);
1146   MachineOperand RegOp1 = getLdStRegOp(*Rt2MI);
1147   MachineOperand &PairedRegOp = RtMI == &*Paired ? RegOp0 : RegOp1;
1148   // Kill flags may become invalid when moving stores for pairing.
1149   if (RegOp0.isUse()) {
1150     if (!MergeForward) {
1151       // Clear kill flags on store if moving upwards. Example:
1152       //   STRWui kill %w0, ...
1153       //   USE %w1
1154       //   STRWui kill %w1  ; need to clear kill flag when moving STRWui upwards
1155       // We are about to move the store of w1, so its kill flag may become
1156       // invalid; not the case for w0.
1157       // Since w1 is used between the stores, the kill flag on w1 is cleared
1158       // after merging.
1159       //   STPWi kill %w0, %w1, ...
1160       //   USE %w1
1161       for (auto It = std::next(I); It != Paired && PairedRegOp.isKill(); ++It)
1162         if (It->readsRegister(PairedRegOp.getReg(), TRI))
1163           PairedRegOp.setIsKill(false);
1164     } else {
1165       // Clear kill flags of the first stores register. Example:
1166       //   STRWui %w1, ...
1167       //   USE kill %w1   ; need to clear kill flag when moving STRWui downwards
1168       //   STRW %w0
1169       Register Reg = getLdStRegOp(*I).getReg();
1170       for (MachineInstr &MI : make_range(std::next(I), Paired))
1171         MI.clearRegisterKills(Reg, TRI);
1172     }
1173   }
1174 
1175   unsigned int MatchPairOpcode = getMatchingPairOpcode(Opc);
1176   MIB = BuildMI(*MBB, InsertionPoint, DL, TII->get(MatchPairOpcode));
1177 
1178   // Adds the pre-index operand for pre-indexed ld/st pairs.
1179   if (AArch64InstrInfo::isPreLdSt(*RtMI))
1180     MIB.addReg(BaseRegOp.getReg(), RegState::Define);
1181 
1182   MIB.add(RegOp0)
1183       .add(RegOp1)
1184       .add(BaseRegOp)
1185       .addImm(OffsetImm)
1186       .cloneMergedMemRefs({&*I, &*Paired})
1187       .setMIFlags(I->mergeFlagsWith(*Paired));
1188 
1189   (void)MIB;
1190 
1191   LLVM_DEBUG(
1192       dbgs() << "Creating pair load/store. Replacing instructions:\n    ");
1193   LLVM_DEBUG(I->print(dbgs()));
1194   LLVM_DEBUG(dbgs() << "    ");
1195   LLVM_DEBUG(Paired->print(dbgs()));
1196   LLVM_DEBUG(dbgs() << "  with instruction:\n    ");
1197   if (SExtIdx != -1) {
1198     // Generate the sign extension for the proper result of the ldp.
1199     // I.e., with X1, that would be:
1200     // %w1 = KILL %w1, implicit-def %x1
1201     // %x1 = SBFMXri killed %x1, 0, 31
1202     MachineOperand &DstMO = MIB->getOperand(SExtIdx);
1203     // Right now, DstMO has the extended register, since it comes from an
1204     // extended opcode.
1205     Register DstRegX = DstMO.getReg();
1206     // Get the W variant of that register.
1207     Register DstRegW = TRI->getSubReg(DstRegX, AArch64::sub_32);
1208     // Update the result of LDP to use the W instead of the X variant.
1209     DstMO.setReg(DstRegW);
1210     LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));
1211     LLVM_DEBUG(dbgs() << "\n");
1212     // Make the machine verifier happy by providing a definition for
1213     // the X register.
1214     // Insert this definition right after the generated LDP, i.e., before
1215     // InsertionPoint.
1216     MachineInstrBuilder MIBKill =
1217         BuildMI(*MBB, InsertionPoint, DL, TII->get(TargetOpcode::KILL), DstRegW)
1218             .addReg(DstRegW)
1219             .addReg(DstRegX, RegState::Define);
1220     MIBKill->getOperand(2).setImplicit();
1221     // Create the sign extension.
1222     MachineInstrBuilder MIBSXTW =
1223         BuildMI(*MBB, InsertionPoint, DL, TII->get(AArch64::SBFMXri), DstRegX)
1224             .addReg(DstRegX)
1225             .addImm(0)
1226             .addImm(31);
1227     (void)MIBSXTW;
1228     LLVM_DEBUG(dbgs() << "  Extend operand:\n    ");
1229     LLVM_DEBUG(((MachineInstr *)MIBSXTW)->print(dbgs()));
1230   } else {
1231     LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));
1232   }
1233   LLVM_DEBUG(dbgs() << "\n");
1234 
1235   if (MergeForward)
1236     for (const MachineOperand &MOP : phys_regs_and_masks(*I))
1237       if (MOP.isReg() && MOP.isKill())
1238         DefinedInBB.addReg(MOP.getReg());
1239 
1240   // Erase the old instructions.
1241   I->eraseFromParent();
1242   Paired->eraseFromParent();
1243 
1244   return NextI;
1245 }
1246 
1247 MachineBasicBlock::iterator
1248 AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
1249                                           MachineBasicBlock::iterator StoreI) {
1250   MachineBasicBlock::iterator NextI =
1251       next_nodbg(LoadI, LoadI->getParent()->end());
1252 
1253   int LoadSize = TII->getMemScale(*LoadI);
1254   int StoreSize = TII->getMemScale(*StoreI);
1255   Register LdRt = getLdStRegOp(*LoadI).getReg();
1256   const MachineOperand &StMO = getLdStRegOp(*StoreI);
1257   Register StRt = getLdStRegOp(*StoreI).getReg();
1258   bool IsStoreXReg = TRI->getRegClass(AArch64::GPR64RegClassID)->contains(StRt);
1259 
1260   assert((IsStoreXReg ||
1261           TRI->getRegClass(AArch64::GPR32RegClassID)->contains(StRt)) &&
1262          "Unexpected RegClass");
1263 
1264   MachineInstr *BitExtMI;
1265   if (LoadSize == StoreSize && (LoadSize == 4 || LoadSize == 8)) {
1266     // Remove the load, if the destination register of the loads is the same
1267     // register for stored value.
1268     if (StRt == LdRt && LoadSize == 8) {
1269       for (MachineInstr &MI : make_range(StoreI->getIterator(),
1270                                          LoadI->getIterator())) {
1271         if (MI.killsRegister(StRt, TRI)) {
1272           MI.clearRegisterKills(StRt, TRI);
1273           break;
1274         }
1275       }
1276       LLVM_DEBUG(dbgs() << "Remove load instruction:\n    ");
1277       LLVM_DEBUG(LoadI->print(dbgs()));
1278       LLVM_DEBUG(dbgs() << "\n");
1279       LoadI->eraseFromParent();
1280       return NextI;
1281     }
1282     // Replace the load with a mov if the load and store are in the same size.
1283     BitExtMI =
1284         BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
1285                 TII->get(IsStoreXReg ? AArch64::ORRXrs : AArch64::ORRWrs), LdRt)
1286             .addReg(IsStoreXReg ? AArch64::XZR : AArch64::WZR)
1287             .add(StMO)
1288             .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
1289             .setMIFlags(LoadI->getFlags());
1290   } else {
1291     // FIXME: Currently we disable this transformation in big-endian targets as
1292     // performance and correctness are verified only in little-endian.
1293     if (!Subtarget->isLittleEndian())
1294       return NextI;
1295     bool IsUnscaled = TII->hasUnscaledLdStOffset(*LoadI);
1296     assert(IsUnscaled == TII->hasUnscaledLdStOffset(*StoreI) &&
1297            "Unsupported ld/st match");
1298     assert(LoadSize <= StoreSize && "Invalid load size");
1299     int UnscaledLdOffset =
1300         IsUnscaled
1301             ? AArch64InstrInfo::getLdStOffsetOp(*LoadI).getImm()
1302             : AArch64InstrInfo::getLdStOffsetOp(*LoadI).getImm() * LoadSize;
1303     int UnscaledStOffset =
1304         IsUnscaled
1305             ? AArch64InstrInfo::getLdStOffsetOp(*StoreI).getImm()
1306             : AArch64InstrInfo::getLdStOffsetOp(*StoreI).getImm() * StoreSize;
1307     int Width = LoadSize * 8;
1308     Register DestReg =
1309         IsStoreXReg ? Register(TRI->getMatchingSuperReg(
1310                           LdRt, AArch64::sub_32, &AArch64::GPR64RegClass))
1311                     : LdRt;
1312 
1313     assert((UnscaledLdOffset >= UnscaledStOffset &&
1314             (UnscaledLdOffset + LoadSize) <= UnscaledStOffset + StoreSize) &&
1315            "Invalid offset");
1316 
1317     int Immr = 8 * (UnscaledLdOffset - UnscaledStOffset);
1318     int Imms = Immr + Width - 1;
1319     if (UnscaledLdOffset == UnscaledStOffset) {
1320       uint32_t AndMaskEncoded = ((IsStoreXReg ? 1 : 0) << 12) // N
1321                                 | ((Immr) << 6)               // immr
1322                                 | ((Imms) << 0)               // imms
1323           ;
1324 
1325       BitExtMI =
1326           BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
1327                   TII->get(IsStoreXReg ? AArch64::ANDXri : AArch64::ANDWri),
1328                   DestReg)
1329               .add(StMO)
1330               .addImm(AndMaskEncoded)
1331               .setMIFlags(LoadI->getFlags());
1332     } else if (IsStoreXReg && Imms == 31) {
1333       // Use the 32 bit variant of UBFM if it's the LSR alias of the
1334       // instruction.
1335       assert(Immr <= Imms && "Expected LSR alias of UBFM");
1336       BitExtMI = BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
1337                          TII->get(AArch64::UBFMWri),
1338                          TRI->getSubReg(DestReg, AArch64::sub_32))
1339                      .addReg(TRI->getSubReg(StRt, AArch64::sub_32))
1340                      .addImm(Immr)
1341                      .addImm(Imms)
1342                      .setMIFlags(LoadI->getFlags());
1343     } else {
1344       BitExtMI =
1345           BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
1346                   TII->get(IsStoreXReg ? AArch64::UBFMXri : AArch64::UBFMWri),
1347                   DestReg)
1348               .add(StMO)
1349               .addImm(Immr)
1350               .addImm(Imms)
1351               .setMIFlags(LoadI->getFlags());
1352     }
1353   }
1354 
1355   // Clear kill flags between store and load.
1356   for (MachineInstr &MI : make_range(StoreI->getIterator(),
1357                                      BitExtMI->getIterator()))
1358     if (MI.killsRegister(StRt, TRI)) {
1359       MI.clearRegisterKills(StRt, TRI);
1360       break;
1361     }
1362 
1363   LLVM_DEBUG(dbgs() << "Promoting load by replacing :\n    ");
1364   LLVM_DEBUG(StoreI->print(dbgs()));
1365   LLVM_DEBUG(dbgs() << "    ");
1366   LLVM_DEBUG(LoadI->print(dbgs()));
1367   LLVM_DEBUG(dbgs() << "  with instructions:\n    ");
1368   LLVM_DEBUG(StoreI->print(dbgs()));
1369   LLVM_DEBUG(dbgs() << "    ");
1370   LLVM_DEBUG((BitExtMI)->print(dbgs()));
1371   LLVM_DEBUG(dbgs() << "\n");
1372 
1373   // Erase the old instructions.
1374   LoadI->eraseFromParent();
1375   return NextI;
1376 }
1377 
1378 static bool inBoundsForPair(bool IsUnscaled, int Offset, int OffsetStride) {
1379   // Convert the byte-offset used by unscaled into an "element" offset used
1380   // by the scaled pair load/store instructions.
1381   if (IsUnscaled) {
1382     // If the byte-offset isn't a multiple of the stride, there's no point
1383     // trying to match it.
1384     if (Offset % OffsetStride)
1385       return false;
1386     Offset /= OffsetStride;
1387   }
1388   return Offset <= 63 && Offset >= -64;
1389 }
1390 
1391 // Do alignment, specialized to power of 2 and for signed ints,
1392 // avoiding having to do a C-style cast from uint_64t to int when
1393 // using alignTo from include/llvm/Support/MathExtras.h.
1394 // FIXME: Move this function to include/MathExtras.h?
1395 static int alignTo(int Num, int PowOf2) {
1396   return (Num + PowOf2 - 1) & ~(PowOf2 - 1);
1397 }
1398 
1399 static bool mayAlias(MachineInstr &MIa,
1400                      SmallVectorImpl<MachineInstr *> &MemInsns,
1401                      AliasAnalysis *AA) {
1402   for (MachineInstr *MIb : MemInsns) {
1403     if (MIa.mayAlias(AA, *MIb, /*UseTBAA*/ false)) {
1404       LLVM_DEBUG(dbgs() << "Aliasing with: "; MIb->dump());
1405       return true;
1406     }
1407   }
1408 
1409   LLVM_DEBUG(dbgs() << "No aliases found\n");
1410   return false;
1411 }
1412 
1413 bool AArch64LoadStoreOpt::findMatchingStore(
1414     MachineBasicBlock::iterator I, unsigned Limit,
1415     MachineBasicBlock::iterator &StoreI) {
1416   MachineBasicBlock::iterator B = I->getParent()->begin();
1417   MachineBasicBlock::iterator MBBI = I;
1418   MachineInstr &LoadMI = *I;
1419   Register BaseReg = AArch64InstrInfo::getLdStBaseOp(LoadMI).getReg();
1420 
1421   // If the load is the first instruction in the block, there's obviously
1422   // not any matching store.
1423   if (MBBI == B)
1424     return false;
1425 
1426   // Track which register units have been modified and used between the first
1427   // insn and the second insn.
1428   ModifiedRegUnits.clear();
1429   UsedRegUnits.clear();
1430 
1431   unsigned Count = 0;
1432   do {
1433     MBBI = prev_nodbg(MBBI, B);
1434     MachineInstr &MI = *MBBI;
1435 
1436     // Don't count transient instructions towards the search limit since there
1437     // may be different numbers of them if e.g. debug information is present.
1438     if (!MI.isTransient())
1439       ++Count;
1440 
1441     // If the load instruction reads directly from the address to which the
1442     // store instruction writes and the stored value is not modified, we can
1443     // promote the load. Since we do not handle stores with pre-/post-index,
1444     // it's unnecessary to check if BaseReg is modified by the store itself.
1445     // Also we can't handle stores without an immediate offset operand,
1446     // while the operand might be the address for a global variable.
1447     if (MI.mayStore() && isMatchingStore(LoadMI, MI) &&
1448         BaseReg == AArch64InstrInfo::getLdStBaseOp(MI).getReg() &&
1449         AArch64InstrInfo::getLdStOffsetOp(MI).isImm() &&
1450         isLdOffsetInRangeOfSt(LoadMI, MI, TII) &&
1451         ModifiedRegUnits.available(getLdStRegOp(MI).getReg())) {
1452       StoreI = MBBI;
1453       return true;
1454     }
1455 
1456     if (MI.isCall())
1457       return false;
1458 
1459     // Update modified / uses register units.
1460     LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits, TRI);
1461 
1462     // Otherwise, if the base register is modified, we have no match, so
1463     // return early.
1464     if (!ModifiedRegUnits.available(BaseReg))
1465       return false;
1466 
1467     // If we encounter a store aliased with the load, return early.
1468     if (MI.mayStore() && LoadMI.mayAlias(AA, MI, /*UseTBAA*/ false))
1469       return false;
1470   } while (MBBI != B && Count < Limit);
1471   return false;
1472 }
1473 
1474 static bool needsWinCFI(const MachineFunction *MF) {
1475   return MF->getTarget().getMCAsmInfo()->usesWindowsCFI() &&
1476          MF->getFunction().needsUnwindTableEntry();
1477 }
1478 
1479 // Returns true if FirstMI and MI are candidates for merging or pairing.
1480 // Otherwise, returns false.
1481 static bool areCandidatesToMergeOrPair(MachineInstr &FirstMI, MachineInstr &MI,
1482                                        LdStPairFlags &Flags,
1483                                        const AArch64InstrInfo *TII) {
1484   // If this is volatile or if pairing is suppressed, not a candidate.
1485   if (MI.hasOrderedMemoryRef() || TII->isLdStPairSuppressed(MI))
1486     return false;
1487 
1488   // We should have already checked FirstMI for pair suppression and volatility.
1489   assert(!FirstMI.hasOrderedMemoryRef() &&
1490          !TII->isLdStPairSuppressed(FirstMI) &&
1491          "FirstMI shouldn't get here if either of these checks are true.");
1492 
1493   if (needsWinCFI(MI.getMF()) && (MI.getFlag(MachineInstr::FrameSetup) ||
1494                                   MI.getFlag(MachineInstr::FrameDestroy)))
1495     return false;
1496 
1497   unsigned OpcA = FirstMI.getOpcode();
1498   unsigned OpcB = MI.getOpcode();
1499 
1500   // Opcodes match: If the opcodes are pre ld/st there is nothing more to check.
1501   if (OpcA == OpcB)
1502     return !AArch64InstrInfo::isPreLdSt(FirstMI);
1503 
1504   // Two pre ld/st of different opcodes cannot be merged either
1505   if (AArch64InstrInfo::isPreLdSt(FirstMI) && AArch64InstrInfo::isPreLdSt(MI))
1506     return false;
1507 
1508   // Try to match a sign-extended load/store with a zero-extended load/store.
1509   bool IsValidLdStrOpc, PairIsValidLdStrOpc;
1510   unsigned NonSExtOpc = getMatchingNonSExtOpcode(OpcA, &IsValidLdStrOpc);
1511   assert(IsValidLdStrOpc &&
1512          "Given Opc should be a Load or Store with an immediate");
1513   // OpcA will be the first instruction in the pair.
1514   if (NonSExtOpc == getMatchingNonSExtOpcode(OpcB, &PairIsValidLdStrOpc)) {
1515     Flags.setSExtIdx(NonSExtOpc == (unsigned)OpcA ? 1 : 0);
1516     return true;
1517   }
1518 
1519   // If the second instruction isn't even a mergable/pairable load/store, bail
1520   // out.
1521   if (!PairIsValidLdStrOpc)
1522     return false;
1523 
1524   // FIXME: We don't support merging narrow stores with mixed scaled/unscaled
1525   // offsets.
1526   if (isNarrowStore(OpcA) || isNarrowStore(OpcB))
1527     return false;
1528 
1529   // The STR<S,D,Q,W,X>pre - STR<S,D,Q,W,X>ui and
1530   // LDR<S,D,Q,W,X,SW>pre-LDR<S,D,Q,W,X,SW>ui
1531   // are candidate pairs that can be merged.
1532   if (isPreLdStPairCandidate(FirstMI, MI))
1533     return true;
1534 
1535   // Try to match an unscaled load/store with a scaled load/store.
1536   return TII->hasUnscaledLdStOffset(OpcA) != TII->hasUnscaledLdStOffset(OpcB) &&
1537          getMatchingPairOpcode(OpcA) == getMatchingPairOpcode(OpcB);
1538 
1539   // FIXME: Can we also match a mixed sext/zext unscaled/scaled pair?
1540 }
1541 
1542 static bool canRenameMOP(const MachineOperand &MOP,
1543                          const TargetRegisterInfo *TRI) {
1544   if (MOP.isReg()) {
1545     auto *RegClass = TRI->getMinimalPhysRegClass(MOP.getReg());
1546     // Renaming registers with multiple disjunct sub-registers (e.g. the
1547     // result of a LD3) means that all sub-registers are renamed, potentially
1548     // impacting other instructions we did not check. Bail out.
1549     // Note that this relies on the structure of the AArch64 register file. In
1550     // particular, a subregister cannot be written without overwriting the
1551     // whole register.
1552     if (RegClass->HasDisjunctSubRegs && RegClass->CoveredBySubRegs &&
1553         (TRI->getSubRegisterClass(RegClass, AArch64::dsub0) ||
1554          TRI->getSubRegisterClass(RegClass, AArch64::qsub0) ||
1555          TRI->getSubRegisterClass(RegClass, AArch64::zsub0))) {
1556       LLVM_DEBUG(
1557           dbgs()
1558           << "  Cannot rename operands with multiple disjunct subregisters ("
1559           << MOP << ")\n");
1560       return false;
1561     }
1562 
1563     // We cannot rename arbitrary implicit-defs, the specific rule to rewrite
1564     // them must be known. For example, in ORRWrs the implicit-def
1565     // corresponds to the result register.
1566     if (MOP.isImplicit() && MOP.isDef()) {
1567       if (!isRewritableImplicitDef(MOP.getParent()->getOpcode()))
1568         return false;
1569       return TRI->isSuperOrSubRegisterEq(
1570           MOP.getParent()->getOperand(0).getReg(), MOP.getReg());
1571     }
1572   }
1573   return MOP.isImplicit() ||
1574          (MOP.isRenamable() && !MOP.isEarlyClobber() && !MOP.isTied());
1575 }
1576 
1577 static bool
1578 canRenameUpToDef(MachineInstr &FirstMI, LiveRegUnits &UsedInBetween,
1579                  SmallPtrSetImpl<const TargetRegisterClass *> &RequiredClasses,
1580                  const TargetRegisterInfo *TRI) {
1581   if (!FirstMI.mayStore())
1582     return false;
1583 
1584   // Check if we can find an unused register which we can use to rename
1585   // the register used by the first load/store.
1586 
1587   auto RegToRename = getLdStRegOp(FirstMI).getReg();
1588   // For now, we only rename if the store operand gets killed at the store.
1589   if (!getLdStRegOp(FirstMI).isKill() &&
1590       !any_of(FirstMI.operands(),
1591               [TRI, RegToRename](const MachineOperand &MOP) {
1592                 return MOP.isReg() && !MOP.isDebug() && MOP.getReg() &&
1593                        MOP.isImplicit() && MOP.isKill() &&
1594                        TRI->regsOverlap(RegToRename, MOP.getReg());
1595               })) {
1596     LLVM_DEBUG(dbgs() << "  Operand not killed at " << FirstMI);
1597     return false;
1598   }
1599 
1600   bool FoundDef = false;
1601 
1602   // For each instruction between FirstMI and the previous def for RegToRename,
1603   // we
1604   // * check if we can rename RegToRename in this instruction
1605   // * collect the registers used and required register classes for RegToRename.
1606   std::function<bool(MachineInstr &, bool)> CheckMIs = [&](MachineInstr &MI,
1607                                                            bool IsDef) {
1608     LLVM_DEBUG(dbgs() << "Checking " << MI);
1609     // Currently we do not try to rename across frame-setup instructions.
1610     if (MI.getFlag(MachineInstr::FrameSetup)) {
1611       LLVM_DEBUG(dbgs() << "  Cannot rename framesetup instructions "
1612                         << "currently\n");
1613       return false;
1614     }
1615 
1616     UsedInBetween.accumulate(MI);
1617 
1618     // For a definition, check that we can rename the definition and exit the
1619     // loop.
1620     FoundDef = IsDef;
1621 
1622     // For defs, check if we can rename the first def of RegToRename.
1623     if (FoundDef) {
1624       // For some pseudo instructions, we might not generate code in the end
1625       // (e.g. KILL) and we would end up without a correct def for the rename
1626       // register.
1627       // TODO: This might be overly conservative and we could handle those cases
1628       // in multiple ways:
1629       //       1. Insert an extra copy, to materialize the def.
1630       //       2. Skip pseudo-defs until we find an non-pseudo def.
1631       if (MI.isPseudo()) {
1632         LLVM_DEBUG(dbgs() << "  Cannot rename pseudo/bundle instruction\n");
1633         return false;
1634       }
1635 
1636       for (auto &MOP : MI.operands()) {
1637         if (!MOP.isReg() || !MOP.isDef() || MOP.isDebug() || !MOP.getReg() ||
1638             !TRI->regsOverlap(MOP.getReg(), RegToRename))
1639           continue;
1640         if (!canRenameMOP(MOP, TRI)) {
1641           LLVM_DEBUG(dbgs() << "  Cannot rename " << MOP << " in " << MI);
1642           return false;
1643         }
1644         RequiredClasses.insert(TRI->getMinimalPhysRegClass(MOP.getReg()));
1645       }
1646       return true;
1647     } else {
1648       for (auto &MOP : MI.operands()) {
1649         if (!MOP.isReg() || MOP.isDebug() || !MOP.getReg() ||
1650             !TRI->regsOverlap(MOP.getReg(), RegToRename))
1651           continue;
1652 
1653         if (!canRenameMOP(MOP, TRI)) {
1654           LLVM_DEBUG(dbgs() << "  Cannot rename " << MOP << " in " << MI);
1655           return false;
1656         }
1657         RequiredClasses.insert(TRI->getMinimalPhysRegClass(MOP.getReg()));
1658       }
1659     }
1660     return true;
1661   };
1662 
1663   if (!forAllMIsUntilDef(FirstMI, RegToRename, TRI, LdStLimit, CheckMIs))
1664     return false;
1665 
1666   if (!FoundDef) {
1667     LLVM_DEBUG(dbgs() << "  Did not find definition for register in BB\n");
1668     return false;
1669   }
1670   return true;
1671 }
1672 
1673 // We want to merge the second load into the first by rewriting the usages of
1674 // the same reg between first (incl.) and second (excl.). We don't need to care
1675 // about any insns before FirstLoad or after SecondLoad.
1676 // 1. The second load writes new value into the same reg.
1677 //    - The renaming is impossible to impact later use of the reg.
1678 //    - The second load always trash the value written by the first load which
1679 //      means the reg must be killed before the second load.
1680 // 2. The first load must be a def for the same reg so we don't need to look
1681 //    into anything before it.
1682 static bool canRenameUntilSecondLoad(
1683     MachineInstr &FirstLoad, MachineInstr &SecondLoad,
1684     LiveRegUnits &UsedInBetween,
1685     SmallPtrSetImpl<const TargetRegisterClass *> &RequiredClasses,
1686     const TargetRegisterInfo *TRI) {
1687   if (FirstLoad.isPseudo())
1688     return false;
1689 
1690   UsedInBetween.accumulate(FirstLoad);
1691   auto RegToRename = getLdStRegOp(FirstLoad).getReg();
1692   bool Success = std::all_of(
1693       FirstLoad.getIterator(), SecondLoad.getIterator(),
1694       [&](MachineInstr &MI) {
1695         LLVM_DEBUG(dbgs() << "Checking " << MI);
1696         // Currently we do not try to rename across frame-setup instructions.
1697         if (MI.getFlag(MachineInstr::FrameSetup)) {
1698           LLVM_DEBUG(dbgs() << "  Cannot rename framesetup instructions "
1699                             << "currently\n");
1700           return false;
1701         }
1702 
1703         for (auto &MOP : MI.operands()) {
1704           if (!MOP.isReg() || MOP.isDebug() || !MOP.getReg() ||
1705               !TRI->regsOverlap(MOP.getReg(), RegToRename))
1706             continue;
1707           if (!canRenameMOP(MOP, TRI)) {
1708             LLVM_DEBUG(dbgs() << "  Cannot rename " << MOP << " in " << MI);
1709             return false;
1710           }
1711           RequiredClasses.insert(TRI->getMinimalPhysRegClass(MOP.getReg()));
1712         }
1713 
1714         return true;
1715       });
1716   return Success;
1717 }
1718 
1719 // Check if we can find a physical register for renaming \p Reg. This register
1720 // must:
1721 // * not be defined already in \p DefinedInBB; DefinedInBB must contain all
1722 //   defined registers up to the point where the renamed register will be used,
1723 // * not used in \p UsedInBetween; UsedInBetween must contain all accessed
1724 //   registers in the range the rename register will be used,
1725 // * is available in all used register classes (checked using RequiredClasses).
1726 static std::optional<MCPhysReg> tryToFindRegisterToRename(
1727     const MachineFunction &MF, Register Reg, LiveRegUnits &DefinedInBB,
1728     LiveRegUnits &UsedInBetween,
1729     SmallPtrSetImpl<const TargetRegisterClass *> &RequiredClasses,
1730     const TargetRegisterInfo *TRI) {
1731   const MachineRegisterInfo &RegInfo = MF.getRegInfo();
1732 
1733   // Checks if any sub- or super-register of PR is callee saved.
1734   auto AnySubOrSuperRegCalleePreserved = [&MF, TRI](MCPhysReg PR) {
1735     return any_of(TRI->sub_and_superregs_inclusive(PR),
1736                   [&MF, TRI](MCPhysReg SubOrSuper) {
1737                     return TRI->isCalleeSavedPhysReg(SubOrSuper, MF);
1738                   });
1739   };
1740 
1741   // Check if PR or one of its sub- or super-registers can be used for all
1742   // required register classes.
1743   auto CanBeUsedForAllClasses = [&RequiredClasses, TRI](MCPhysReg PR) {
1744     return all_of(RequiredClasses, [PR, TRI](const TargetRegisterClass *C) {
1745       return any_of(
1746           TRI->sub_and_superregs_inclusive(PR),
1747           [C](MCPhysReg SubOrSuper) { return C->contains(SubOrSuper); });
1748     });
1749   };
1750 
1751   auto *RegClass = TRI->getMinimalPhysRegClass(Reg);
1752   for (const MCPhysReg &PR : *RegClass) {
1753     if (DefinedInBB.available(PR) && UsedInBetween.available(PR) &&
1754         !RegInfo.isReserved(PR) && !AnySubOrSuperRegCalleePreserved(PR) &&
1755         CanBeUsedForAllClasses(PR)) {
1756       DefinedInBB.addReg(PR);
1757       LLVM_DEBUG(dbgs() << "Found rename register " << printReg(PR, TRI)
1758                         << "\n");
1759       return {PR};
1760     }
1761   }
1762   LLVM_DEBUG(dbgs() << "No rename register found from "
1763                     << TRI->getRegClassName(RegClass) << "\n");
1764   return std::nullopt;
1765 }
1766 
1767 // For store pairs: returns a register from FirstMI to the beginning of the
1768 // block that can be renamed.
1769 // For load pairs: returns a register from FirstMI to MI that can be renamed.
1770 static std::optional<MCPhysReg> findRenameRegForSameLdStRegPair(
1771     std::optional<bool> MaybeCanRename, MachineInstr &FirstMI, MachineInstr &MI,
1772     Register Reg, LiveRegUnits &DefinedInBB, LiveRegUnits &UsedInBetween,
1773     SmallPtrSetImpl<const TargetRegisterClass *> &RequiredClasses,
1774     const TargetRegisterInfo *TRI) {
1775   std::optional<MCPhysReg> RenameReg;
1776   if (!DebugCounter::shouldExecute(RegRenamingCounter))
1777     return RenameReg;
1778 
1779   auto *RegClass = TRI->getMinimalPhysRegClass(getLdStRegOp(FirstMI).getReg());
1780   MachineFunction &MF = *FirstMI.getParent()->getParent();
1781   if (!RegClass || !MF.getRegInfo().tracksLiveness())
1782     return RenameReg;
1783 
1784   const bool IsLoad = FirstMI.mayLoad();
1785 
1786   if (!MaybeCanRename) {
1787     if (IsLoad)
1788       MaybeCanRename = {canRenameUntilSecondLoad(FirstMI, MI, UsedInBetween,
1789                                                  RequiredClasses, TRI)};
1790     else
1791       MaybeCanRename = {
1792           canRenameUpToDef(FirstMI, UsedInBetween, RequiredClasses, TRI)};
1793   }
1794 
1795   if (*MaybeCanRename) {
1796     RenameReg = tryToFindRegisterToRename(MF, Reg, DefinedInBB, UsedInBetween,
1797                                           RequiredClasses, TRI);
1798   }
1799   return RenameReg;
1800 }
1801 
1802 /// Scan the instructions looking for a load/store that can be combined with the
1803 /// current instruction into a wider equivalent or a load/store pair.
1804 MachineBasicBlock::iterator
1805 AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
1806                                       LdStPairFlags &Flags, unsigned Limit,
1807                                       bool FindNarrowMerge) {
1808   MachineBasicBlock::iterator E = I->getParent()->end();
1809   MachineBasicBlock::iterator MBBI = I;
1810   MachineBasicBlock::iterator MBBIWithRenameReg;
1811   MachineInstr &FirstMI = *I;
1812   MBBI = next_nodbg(MBBI, E);
1813 
1814   bool MayLoad = FirstMI.mayLoad();
1815   bool IsUnscaled = TII->hasUnscaledLdStOffset(FirstMI);
1816   Register Reg = getLdStRegOp(FirstMI).getReg();
1817   Register BaseReg = AArch64InstrInfo::getLdStBaseOp(FirstMI).getReg();
1818   int Offset = AArch64InstrInfo::getLdStOffsetOp(FirstMI).getImm();
1819   int OffsetStride = IsUnscaled ? TII->getMemScale(FirstMI) : 1;
1820   bool IsPromotableZeroStore = isPromotableZeroStoreInst(FirstMI);
1821 
1822   std::optional<bool> MaybeCanRename;
1823   if (!EnableRenaming)
1824     MaybeCanRename = {false};
1825 
1826   SmallPtrSet<const TargetRegisterClass *, 5> RequiredClasses;
1827   LiveRegUnits UsedInBetween;
1828   UsedInBetween.init(*TRI);
1829 
1830   Flags.clearRenameReg();
1831 
1832   // Track which register units have been modified and used between the first
1833   // insn (inclusive) and the second insn.
1834   ModifiedRegUnits.clear();
1835   UsedRegUnits.clear();
1836 
1837   // Remember any instructions that read/write memory between FirstMI and MI.
1838   SmallVector<MachineInstr *, 4> MemInsns;
1839 
1840   LLVM_DEBUG(dbgs() << "Find match for: "; FirstMI.dump());
1841   for (unsigned Count = 0; MBBI != E && Count < Limit;
1842        MBBI = next_nodbg(MBBI, E)) {
1843     MachineInstr &MI = *MBBI;
1844     LLVM_DEBUG(dbgs() << "Analysing 2nd insn: "; MI.dump());
1845 
1846     UsedInBetween.accumulate(MI);
1847 
1848     // Don't count transient instructions towards the search limit since there
1849     // may be different numbers of them if e.g. debug information is present.
1850     if (!MI.isTransient())
1851       ++Count;
1852 
1853     Flags.setSExtIdx(-1);
1854     if (areCandidatesToMergeOrPair(FirstMI, MI, Flags, TII) &&
1855         AArch64InstrInfo::getLdStOffsetOp(MI).isImm()) {
1856       assert(MI.mayLoadOrStore() && "Expected memory operation.");
1857       // If we've found another instruction with the same opcode, check to see
1858       // if the base and offset are compatible with our starting instruction.
1859       // These instructions all have scaled immediate operands, so we just
1860       // check for +1/-1. Make sure to check the new instruction offset is
1861       // actually an immediate and not a symbolic reference destined for
1862       // a relocation.
1863       Register MIBaseReg = AArch64InstrInfo::getLdStBaseOp(MI).getReg();
1864       int MIOffset = AArch64InstrInfo::getLdStOffsetOp(MI).getImm();
1865       bool MIIsUnscaled = TII->hasUnscaledLdStOffset(MI);
1866       if (IsUnscaled != MIIsUnscaled) {
1867         // We're trying to pair instructions that differ in how they are scaled.
1868         // If FirstMI is scaled then scale the offset of MI accordingly.
1869         // Otherwise, do the opposite (i.e., make MI's offset unscaled).
1870         int MemSize = TII->getMemScale(MI);
1871         if (MIIsUnscaled) {
1872           // If the unscaled offset isn't a multiple of the MemSize, we can't
1873           // pair the operations together: bail and keep looking.
1874           if (MIOffset % MemSize) {
1875             LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits,
1876                                               UsedRegUnits, TRI);
1877             MemInsns.push_back(&MI);
1878             continue;
1879           }
1880           MIOffset /= MemSize;
1881         } else {
1882           MIOffset *= MemSize;
1883         }
1884       }
1885 
1886       bool IsPreLdSt = isPreLdStPairCandidate(FirstMI, MI);
1887 
1888       if (BaseReg == MIBaseReg) {
1889         // If the offset of the second ld/st is not equal to the size of the
1890         // destination register it can’t be paired with a pre-index ld/st
1891         // pair. Additionally if the base reg is used or modified the operations
1892         // can't be paired: bail and keep looking.
1893         if (IsPreLdSt) {
1894           bool IsOutOfBounds = MIOffset != TII->getMemScale(MI);
1895           bool IsBaseRegUsed = !UsedRegUnits.available(
1896               AArch64InstrInfo::getLdStBaseOp(MI).getReg());
1897           bool IsBaseRegModified = !ModifiedRegUnits.available(
1898               AArch64InstrInfo::getLdStBaseOp(MI).getReg());
1899           // If the stored value and the address of the second instruction is
1900           // the same, it needs to be using the updated register and therefore
1901           // it must not be folded.
1902           bool IsMIRegTheSame =
1903               TRI->regsOverlap(getLdStRegOp(MI).getReg(),
1904                                AArch64InstrInfo::getLdStBaseOp(MI).getReg());
1905           if (IsOutOfBounds || IsBaseRegUsed || IsBaseRegModified ||
1906               IsMIRegTheSame) {
1907             LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits,
1908                                               UsedRegUnits, TRI);
1909             MemInsns.push_back(&MI);
1910             continue;
1911           }
1912         } else {
1913           if ((Offset != MIOffset + OffsetStride) &&
1914               (Offset + OffsetStride != MIOffset)) {
1915             LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits,
1916                                               UsedRegUnits, TRI);
1917             MemInsns.push_back(&MI);
1918             continue;
1919           }
1920         }
1921 
1922         int MinOffset = Offset < MIOffset ? Offset : MIOffset;
1923         if (FindNarrowMerge) {
1924           // If the alignment requirements of the scaled wide load/store
1925           // instruction can't express the offset of the scaled narrow input,
1926           // bail and keep looking. For promotable zero stores, allow only when
1927           // the stored value is the same (i.e., WZR).
1928           if ((!IsUnscaled && alignTo(MinOffset, 2) != MinOffset) ||
1929               (IsPromotableZeroStore && Reg != getLdStRegOp(MI).getReg())) {
1930             LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits,
1931                                               UsedRegUnits, TRI);
1932             MemInsns.push_back(&MI);
1933             continue;
1934           }
1935         } else {
1936           // Pairwise instructions have a 7-bit signed offset field. Single
1937           // insns have a 12-bit unsigned offset field.  If the resultant
1938           // immediate offset of merging these instructions is out of range for
1939           // a pairwise instruction, bail and keep looking.
1940           if (!inBoundsForPair(IsUnscaled, MinOffset, OffsetStride)) {
1941             LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits,
1942                                               UsedRegUnits, TRI);
1943             MemInsns.push_back(&MI);
1944             LLVM_DEBUG(dbgs() << "Offset doesn't fit in immediate, "
1945                               << "keep looking.\n");
1946             continue;
1947           }
1948           // If the alignment requirements of the paired (scaled) instruction
1949           // can't express the offset of the unscaled input, bail and keep
1950           // looking.
1951           if (IsUnscaled && (alignTo(MinOffset, OffsetStride) != MinOffset)) {
1952             LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits,
1953                                               UsedRegUnits, TRI);
1954             MemInsns.push_back(&MI);
1955             LLVM_DEBUG(dbgs()
1956                        << "Offset doesn't fit due to alignment requirements, "
1957                        << "keep looking.\n");
1958             continue;
1959           }
1960         }
1961 
1962         // If the BaseReg has been modified, then we cannot do the optimization.
1963         // For example, in the following pattern
1964         //   ldr x1 [x2]
1965         //   ldr x2 [x3]
1966         //   ldr x4 [x2, #8],
1967         // the first and third ldr cannot be converted to ldp x1, x4, [x2]
1968         if (!ModifiedRegUnits.available(BaseReg))
1969           return E;
1970 
1971         const bool SameLoadReg = MayLoad && TRI->isSuperOrSubRegisterEq(
1972                                                 Reg, getLdStRegOp(MI).getReg());
1973 
1974         // If the Rt of the second instruction (destination register of the
1975         // load) was not modified or used between the two instructions and none
1976         // of the instructions between the second and first alias with the
1977         // second, we can combine the second into the first.
1978         bool RtNotModified =
1979             ModifiedRegUnits.available(getLdStRegOp(MI).getReg());
1980         bool RtNotUsed = !(MI.mayLoad() && !SameLoadReg &&
1981                            !UsedRegUnits.available(getLdStRegOp(MI).getReg()));
1982 
1983         LLVM_DEBUG(dbgs() << "Checking, can combine 2nd into 1st insn:\n"
1984                           << "Reg '" << getLdStRegOp(MI) << "' not modified: "
1985                           << (RtNotModified ? "true" : "false") << "\n"
1986                           << "Reg '" << getLdStRegOp(MI) << "' not used: "
1987                           << (RtNotUsed ? "true" : "false") << "\n");
1988 
1989         if (RtNotModified && RtNotUsed && !mayAlias(MI, MemInsns, AA)) {
1990           // For pairs loading into the same reg, try to find a renaming
1991           // opportunity to allow the renaming of Reg between FirstMI and MI
1992           // and combine MI into FirstMI; otherwise bail and keep looking.
1993           if (SameLoadReg) {
1994             std::optional<MCPhysReg> RenameReg =
1995                 findRenameRegForSameLdStRegPair(MaybeCanRename, FirstMI, MI,
1996                                                 Reg, DefinedInBB, UsedInBetween,
1997                                                 RequiredClasses, TRI);
1998             if (!RenameReg) {
1999               LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits,
2000                                                 UsedRegUnits, TRI);
2001               MemInsns.push_back(&MI);
2002               LLVM_DEBUG(dbgs() << "Can't find reg for renaming, "
2003                                 << "keep looking.\n");
2004               continue;
2005             }
2006             Flags.setRenameReg(*RenameReg);
2007           }
2008 
2009           Flags.setMergeForward(false);
2010           if (!SameLoadReg)
2011             Flags.clearRenameReg();
2012           return MBBI;
2013         }
2014 
2015         // Likewise, if the Rt of the first instruction is not modified or used
2016         // between the two instructions and none of the instructions between the
2017         // first and the second alias with the first, we can combine the first
2018         // into the second.
2019         RtNotModified = !(
2020             MayLoad && !UsedRegUnits.available(getLdStRegOp(FirstMI).getReg()));
2021 
2022         LLVM_DEBUG(dbgs() << "Checking, can combine 1st into 2nd insn:\n"
2023                           << "Reg '" << getLdStRegOp(FirstMI)
2024                           << "' not modified: "
2025                           << (RtNotModified ? "true" : "false") << "\n");
2026 
2027         if (RtNotModified && !mayAlias(FirstMI, MemInsns, AA)) {
2028           if (ModifiedRegUnits.available(getLdStRegOp(FirstMI).getReg())) {
2029             Flags.setMergeForward(true);
2030             Flags.clearRenameReg();
2031             return MBBI;
2032           }
2033 
2034           std::optional<MCPhysReg> RenameReg = findRenameRegForSameLdStRegPair(
2035               MaybeCanRename, FirstMI, MI, Reg, DefinedInBB, UsedInBetween,
2036               RequiredClasses, TRI);
2037           if (RenameReg) {
2038             Flags.setMergeForward(true);
2039             Flags.setRenameReg(*RenameReg);
2040             MBBIWithRenameReg = MBBI;
2041           }
2042         }
2043         LLVM_DEBUG(dbgs() << "Unable to combine these instructions due to "
2044                           << "interference in between, keep looking.\n");
2045       }
2046     }
2047 
2048     if (Flags.getRenameReg())
2049       return MBBIWithRenameReg;
2050 
2051     // If the instruction wasn't a matching load or store.  Stop searching if we
2052     // encounter a call instruction that might modify memory.
2053     if (MI.isCall()) {
2054       LLVM_DEBUG(dbgs() << "Found a call, stop looking.\n");
2055       return E;
2056     }
2057 
2058     // Update modified / uses register units.
2059     LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits, TRI);
2060 
2061     // Otherwise, if the base register is modified, we have no match, so
2062     // return early.
2063     if (!ModifiedRegUnits.available(BaseReg)) {
2064       LLVM_DEBUG(dbgs() << "Base reg is modified, stop looking.\n");
2065       return E;
2066     }
2067 
2068     // Update list of instructions that read/write memory.
2069     if (MI.mayLoadOrStore())
2070       MemInsns.push_back(&MI);
2071   }
2072   return E;
2073 }
2074 
2075 static MachineBasicBlock::iterator
2076 maybeMoveCFI(MachineInstr &MI, MachineBasicBlock::iterator MaybeCFI) {
2077   assert((MI.getOpcode() == AArch64::SUBXri ||
2078           MI.getOpcode() == AArch64::ADDXri) &&
2079          "Expected a register update instruction");
2080   auto End = MI.getParent()->end();
2081   if (MaybeCFI == End ||
2082       MaybeCFI->getOpcode() != TargetOpcode::CFI_INSTRUCTION ||
2083       !(MI.getFlag(MachineInstr::FrameSetup) ||
2084         MI.getFlag(MachineInstr::FrameDestroy)) ||
2085       MI.getOperand(0).getReg() != AArch64::SP)
2086     return End;
2087 
2088   const MachineFunction &MF = *MI.getParent()->getParent();
2089   unsigned CFIIndex = MaybeCFI->getOperand(0).getCFIIndex();
2090   const MCCFIInstruction &CFI = MF.getFrameInstructions()[CFIIndex];
2091   switch (CFI.getOperation()) {
2092   case MCCFIInstruction::OpDefCfa:
2093   case MCCFIInstruction::OpDefCfaOffset:
2094     return MaybeCFI;
2095   default:
2096     return End;
2097   }
2098 }
2099 
2100 std::optional<MachineBasicBlock::iterator> AArch64LoadStoreOpt::mergeUpdateInsn(
2101     MachineBasicBlock::iterator I, MachineBasicBlock::iterator Update,
2102     bool IsForward, bool IsPreIdx, bool MergeEither) {
2103   assert((Update->getOpcode() == AArch64::ADDXri ||
2104           Update->getOpcode() == AArch64::SUBXri) &&
2105          "Unexpected base register update instruction to merge!");
2106   MachineBasicBlock::iterator E = I->getParent()->end();
2107   MachineBasicBlock::iterator NextI = next_nodbg(I, E);
2108 
2109   // If updating the SP and the following instruction is CFA offset related CFI,
2110   // make sure the CFI follows the SP update either by merging at the location
2111   // of the update or by moving the CFI after the merged instruction. If unable
2112   // to do so, bail.
2113   MachineBasicBlock::iterator InsertPt = I;
2114   if (IsForward) {
2115     assert(IsPreIdx);
2116     if (auto CFI = maybeMoveCFI(*Update, next_nodbg(Update, E)); CFI != E) {
2117       if (MergeEither) {
2118         InsertPt = Update;
2119       } else {
2120         // Take care not to reorder CFIs.
2121         if (std::any_of(std::next(CFI), I, [](const auto &Insn) {
2122               return Insn.getOpcode() == TargetOpcode::CFI_INSTRUCTION;
2123             }))
2124           return std::nullopt;
2125 
2126         MachineBasicBlock *MBB = InsertPt->getParent();
2127         MBB->splice(std::next(InsertPt), MBB, CFI);
2128       }
2129     }
2130   }
2131 
2132   // Return the instruction following the merged instruction, which is
2133   // the instruction following our unmerged load. Unless that's the add/sub
2134   // instruction we're merging, in which case it's the one after that.
2135   if (NextI == Update)
2136     NextI = next_nodbg(NextI, E);
2137 
2138   int Value = Update->getOperand(2).getImm();
2139   assert(AArch64_AM::getShiftValue(Update->getOperand(3).getImm()) == 0 &&
2140          "Can't merge 1 << 12 offset into pre-/post-indexed load / store");
2141   if (Update->getOpcode() == AArch64::SUBXri)
2142     Value = -Value;
2143 
2144   unsigned NewOpc = IsPreIdx ? getPreIndexedOpcode(I->getOpcode())
2145                              : getPostIndexedOpcode(I->getOpcode());
2146   MachineInstrBuilder MIB;
2147   int Scale, MinOffset, MaxOffset;
2148   getPrePostIndexedMemOpInfo(*I, Scale, MinOffset, MaxOffset);
2149   if (!AArch64InstrInfo::isPairedLdSt(*I)) {
2150     // Non-paired instruction.
2151     MIB = BuildMI(*InsertPt->getParent(), InsertPt, InsertPt->getDebugLoc(),
2152                   TII->get(NewOpc))
2153               .add(Update->getOperand(0))
2154               .add(getLdStRegOp(*I))
2155               .add(AArch64InstrInfo::getLdStBaseOp(*I))
2156               .addImm(Value / Scale)
2157               .setMemRefs(I->memoperands())
2158               .setMIFlags(I->mergeFlagsWith(*Update));
2159   } else {
2160     // Paired instruction.
2161     MIB = BuildMI(*InsertPt->getParent(), InsertPt, InsertPt->getDebugLoc(),
2162                   TII->get(NewOpc))
2163               .add(Update->getOperand(0))
2164               .add(getLdStRegOp(*I, 0))
2165               .add(getLdStRegOp(*I, 1))
2166               .add(AArch64InstrInfo::getLdStBaseOp(*I))
2167               .addImm(Value / Scale)
2168               .setMemRefs(I->memoperands())
2169               .setMIFlags(I->mergeFlagsWith(*Update));
2170   }
2171 
2172   if (IsPreIdx) {
2173     ++NumPreFolded;
2174     LLVM_DEBUG(dbgs() << "Creating pre-indexed load/store.");
2175   } else {
2176     ++NumPostFolded;
2177     LLVM_DEBUG(dbgs() << "Creating post-indexed load/store.");
2178   }
2179   LLVM_DEBUG(dbgs() << "    Replacing instructions:\n    ");
2180   LLVM_DEBUG(I->print(dbgs()));
2181   LLVM_DEBUG(dbgs() << "    ");
2182   LLVM_DEBUG(Update->print(dbgs()));
2183   LLVM_DEBUG(dbgs() << "  with instruction:\n    ");
2184   LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));
2185   LLVM_DEBUG(dbgs() << "\n");
2186 
2187   // Erase the old instructions for the block.
2188   I->eraseFromParent();
2189   Update->eraseFromParent();
2190 
2191   return NextI;
2192 }
2193 
2194 MachineBasicBlock::iterator
2195 AArch64LoadStoreOpt::mergeConstOffsetInsn(MachineBasicBlock::iterator I,
2196                                           MachineBasicBlock::iterator Update,
2197                                           unsigned Offset, int Scale) {
2198   assert((Update->getOpcode() == AArch64::MOVKWi) &&
2199          "Unexpected const mov instruction to merge!");
2200   MachineBasicBlock::iterator E = I->getParent()->end();
2201   MachineBasicBlock::iterator NextI = next_nodbg(I, E);
2202   MachineBasicBlock::iterator PrevI = prev_nodbg(Update, E);
2203   MachineInstr &MemMI = *I;
2204   unsigned Mask = (1 << 12) * Scale - 1;
2205   unsigned Low = Offset & Mask;
2206   unsigned High = Offset - Low;
2207   Register BaseReg = AArch64InstrInfo::getLdStBaseOp(MemMI).getReg();
2208   Register IndexReg = AArch64InstrInfo::getLdStOffsetOp(MemMI).getReg();
2209   MachineInstrBuilder AddMIB, MemMIB;
2210 
2211   // Add IndexReg, BaseReg, High (the BaseReg may be SP)
2212   AddMIB =
2213       BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(AArch64::ADDXri))
2214           .addDef(IndexReg)
2215           .addUse(BaseReg)
2216           .addImm(High >> 12) // shifted value
2217           .addImm(12);        // shift 12
2218   (void)AddMIB;
2219   // Ld/St DestReg, IndexReg, Imm12
2220   unsigned NewOpc = getBaseAddressOpcode(I->getOpcode());
2221   MemMIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
2222                .add(getLdStRegOp(MemMI))
2223                .add(AArch64InstrInfo::getLdStOffsetOp(MemMI))
2224                .addImm(Low / Scale)
2225                .setMemRefs(I->memoperands())
2226                .setMIFlags(I->mergeFlagsWith(*Update));
2227   (void)MemMIB;
2228 
2229   ++NumConstOffsetFolded;
2230   LLVM_DEBUG(dbgs() << "Creating base address load/store.\n");
2231   LLVM_DEBUG(dbgs() << "    Replacing instructions:\n    ");
2232   LLVM_DEBUG(PrevI->print(dbgs()));
2233   LLVM_DEBUG(dbgs() << "    ");
2234   LLVM_DEBUG(Update->print(dbgs()));
2235   LLVM_DEBUG(dbgs() << "    ");
2236   LLVM_DEBUG(I->print(dbgs()));
2237   LLVM_DEBUG(dbgs() << "  with instruction:\n    ");
2238   LLVM_DEBUG(((MachineInstr *)AddMIB)->print(dbgs()));
2239   LLVM_DEBUG(dbgs() << "    ");
2240   LLVM_DEBUG(((MachineInstr *)MemMIB)->print(dbgs()));
2241   LLVM_DEBUG(dbgs() << "\n");
2242 
2243   // Erase the old instructions for the block.
2244   I->eraseFromParent();
2245   PrevI->eraseFromParent();
2246   Update->eraseFromParent();
2247 
2248   return NextI;
2249 }
2250 
2251 bool AArch64LoadStoreOpt::isMatchingUpdateInsn(MachineInstr &MemMI,
2252                                                MachineInstr &MI,
2253                                                unsigned BaseReg, int Offset) {
2254   switch (MI.getOpcode()) {
2255   default:
2256     break;
2257   case AArch64::SUBXri:
2258   case AArch64::ADDXri:
2259     // Make sure it's a vanilla immediate operand, not a relocation or
2260     // anything else we can't handle.
2261     if (!MI.getOperand(2).isImm())
2262       break;
2263     // Watch out for 1 << 12 shifted value.
2264     if (AArch64_AM::getShiftValue(MI.getOperand(3).getImm()))
2265       break;
2266 
2267     // The update instruction source and destination register must be the
2268     // same as the load/store base register.
2269     if (MI.getOperand(0).getReg() != BaseReg ||
2270         MI.getOperand(1).getReg() != BaseReg)
2271       break;
2272 
2273     int UpdateOffset = MI.getOperand(2).getImm();
2274     if (MI.getOpcode() == AArch64::SUBXri)
2275       UpdateOffset = -UpdateOffset;
2276 
2277     // The immediate must be a multiple of the scaling factor of the pre/post
2278     // indexed instruction.
2279     int Scale, MinOffset, MaxOffset;
2280     getPrePostIndexedMemOpInfo(MemMI, Scale, MinOffset, MaxOffset);
2281     if (UpdateOffset % Scale != 0)
2282       break;
2283 
2284     // Scaled offset must fit in the instruction immediate.
2285     int ScaledOffset = UpdateOffset / Scale;
2286     if (ScaledOffset > MaxOffset || ScaledOffset < MinOffset)
2287       break;
2288 
2289     // If we have a non-zero Offset, we check that it matches the amount
2290     // we're adding to the register.
2291     if (!Offset || Offset == UpdateOffset)
2292       return true;
2293     break;
2294   }
2295   return false;
2296 }
2297 
2298 bool AArch64LoadStoreOpt::isMatchingMovConstInsn(MachineInstr &MemMI,
2299                                                  MachineInstr &MI,
2300                                                  unsigned IndexReg,
2301                                                  unsigned &Offset) {
2302   // The update instruction source and destination register must be the
2303   // same as the load/store index register.
2304   if (MI.getOpcode() == AArch64::MOVKWi &&
2305       TRI->isSuperOrSubRegisterEq(IndexReg, MI.getOperand(1).getReg())) {
2306 
2307     // movz + movk hold a large offset of a Ld/St instruction.
2308     MachineBasicBlock::iterator B = MI.getParent()->begin();
2309     MachineBasicBlock::iterator MBBI = &MI;
2310     // Skip the scene when the MI is the first instruction of a block.
2311     if (MBBI == B)
2312       return false;
2313     MBBI = prev_nodbg(MBBI, B);
2314     MachineInstr &MovzMI = *MBBI;
2315     // Make sure the MOVKWi and MOVZWi set the same register.
2316     if (MovzMI.getOpcode() == AArch64::MOVZWi &&
2317         MovzMI.getOperand(0).getReg() == MI.getOperand(0).getReg()) {
2318       unsigned Low = MovzMI.getOperand(1).getImm();
2319       unsigned High = MI.getOperand(2).getImm() << MI.getOperand(3).getImm();
2320       Offset = High + Low;
2321       // 12-bit optionally shifted immediates are legal for adds.
2322       return Offset >> 24 == 0;
2323     }
2324   }
2325   return false;
2326 }
2327 
2328 MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnForward(
2329     MachineBasicBlock::iterator I, int UnscaledOffset, unsigned Limit) {
2330   MachineBasicBlock::iterator E = I->getParent()->end();
2331   MachineInstr &MemMI = *I;
2332   MachineBasicBlock::iterator MBBI = I;
2333 
2334   Register BaseReg = AArch64InstrInfo::getLdStBaseOp(MemMI).getReg();
2335   int MIUnscaledOffset = AArch64InstrInfo::getLdStOffsetOp(MemMI).getImm() *
2336                          TII->getMemScale(MemMI);
2337 
2338   // Scan forward looking for post-index opportunities.  Updating instructions
2339   // can't be formed if the memory instruction doesn't have the offset we're
2340   // looking for.
2341   if (MIUnscaledOffset != UnscaledOffset)
2342     return E;
2343 
2344   // If the base register overlaps a source/destination register, we can't
2345   // merge the update. This does not apply to tag store instructions which
2346   // ignore the address part of the source register.
2347   // This does not apply to STGPi as well, which does not have unpredictable
2348   // behavior in this case unlike normal stores, and always performs writeback
2349   // after reading the source register value.
2350   if (!isTagStore(MemMI) && MemMI.getOpcode() != AArch64::STGPi) {
2351     bool IsPairedInsn = AArch64InstrInfo::isPairedLdSt(MemMI);
2352     for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
2353       Register DestReg = getLdStRegOp(MemMI, i).getReg();
2354       if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
2355         return E;
2356     }
2357   }
2358 
2359   // Track which register units have been modified and used between the first
2360   // insn (inclusive) and the second insn.
2361   ModifiedRegUnits.clear();
2362   UsedRegUnits.clear();
2363   MBBI = next_nodbg(MBBI, E);
2364 
2365   // We can't post-increment the stack pointer if any instruction between
2366   // the memory access (I) and the increment (MBBI) can access the memory
2367   // region defined by [SP, MBBI].
2368   const bool BaseRegSP = BaseReg == AArch64::SP;
2369   if (BaseRegSP && needsWinCFI(I->getMF())) {
2370     // FIXME: For now, we always block the optimization over SP in windows
2371     // targets as it requires to adjust the unwind/debug info, messing up
2372     // the unwind info can actually cause a miscompile.
2373     return E;
2374   }
2375 
2376   for (unsigned Count = 0; MBBI != E && Count < Limit;
2377        MBBI = next_nodbg(MBBI, E)) {
2378     MachineInstr &MI = *MBBI;
2379 
2380     // Don't count transient instructions towards the search limit since there
2381     // may be different numbers of them if e.g. debug information is present.
2382     if (!MI.isTransient())
2383       ++Count;
2384 
2385     // If we found a match, return it.
2386     if (isMatchingUpdateInsn(*I, MI, BaseReg, UnscaledOffset))
2387       return MBBI;
2388 
2389     // Update the status of what the instruction clobbered and used.
2390     LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits, TRI);
2391 
2392     // Otherwise, if the base register is used or modified, we have no match, so
2393     // return early.
2394     // If we are optimizing SP, do not allow instructions that may load or store
2395     // in between the load and the optimized value update.
2396     if (!ModifiedRegUnits.available(BaseReg) ||
2397         !UsedRegUnits.available(BaseReg) ||
2398         (BaseRegSP && MBBI->mayLoadOrStore()))
2399       return E;
2400   }
2401   return E;
2402 }
2403 
2404 MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnBackward(
2405     MachineBasicBlock::iterator I, unsigned Limit, bool &MergeEither) {
2406   MachineBasicBlock::iterator B = I->getParent()->begin();
2407   MachineBasicBlock::iterator E = I->getParent()->end();
2408   MachineInstr &MemMI = *I;
2409   MachineBasicBlock::iterator MBBI = I;
2410   MachineFunction &MF = *MemMI.getMF();
2411 
2412   Register BaseReg = AArch64InstrInfo::getLdStBaseOp(MemMI).getReg();
2413   int Offset = AArch64InstrInfo::getLdStOffsetOp(MemMI).getImm();
2414 
2415   bool IsPairedInsn = AArch64InstrInfo::isPairedLdSt(MemMI);
2416   Register DestReg[] = {getLdStRegOp(MemMI, 0).getReg(),
2417                         IsPairedInsn ? getLdStRegOp(MemMI, 1).getReg()
2418                                      : AArch64::NoRegister};
2419 
2420   // If the load/store is the first instruction in the block, there's obviously
2421   // not any matching update. Ditto if the memory offset isn't zero.
2422   if (MBBI == B || Offset != 0)
2423     return E;
2424   // If the base register overlaps a destination register, we can't
2425   // merge the update.
2426   if (!isTagStore(MemMI)) {
2427     for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i)
2428       if (DestReg[i] == BaseReg || TRI->isSubRegister(BaseReg, DestReg[i]))
2429         return E;
2430   }
2431 
2432   const bool BaseRegSP = BaseReg == AArch64::SP;
2433   if (BaseRegSP && needsWinCFI(I->getMF())) {
2434     // FIXME: For now, we always block the optimization over SP in windows
2435     // targets as it requires to adjust the unwind/debug info, messing up
2436     // the unwind info can actually cause a miscompile.
2437     return E;
2438   }
2439 
2440   const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
2441   unsigned RedZoneSize =
2442       Subtarget.getTargetLowering()->getRedZoneSize(MF.getFunction());
2443 
2444   // Track which register units have been modified and used between the first
2445   // insn (inclusive) and the second insn.
2446   ModifiedRegUnits.clear();
2447   UsedRegUnits.clear();
2448   unsigned Count = 0;
2449   bool MemAcessBeforeSPPreInc = false;
2450   MergeEither = true;
2451   do {
2452     MBBI = prev_nodbg(MBBI, B);
2453     MachineInstr &MI = *MBBI;
2454 
2455     // Don't count transient instructions towards the search limit since there
2456     // may be different numbers of them if e.g. debug information is present.
2457     if (!MI.isTransient())
2458       ++Count;
2459 
2460     // If we found a match, return it.
2461     if (isMatchingUpdateInsn(*I, MI, BaseReg, Offset)) {
2462       // Check that the update value is within our red zone limit (which may be
2463       // zero).
2464       if (MemAcessBeforeSPPreInc && MBBI->getOperand(2).getImm() > RedZoneSize)
2465         return E;
2466       return MBBI;
2467     }
2468 
2469     // Update the status of what the instruction clobbered and used.
2470     LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits, TRI);
2471 
2472     // Otherwise, if the base register is used or modified, we have no match, so
2473     // return early.
2474     if (!ModifiedRegUnits.available(BaseReg) ||
2475         !UsedRegUnits.available(BaseReg))
2476       return E;
2477 
2478     // If we have a destination register (i.e. a load instruction) and a
2479     // destination register is used or modified, then we can only merge forward,
2480     // i.e. the combined instruction is put in the place of the memory
2481     // instruction. Same applies if we see a memory access or side effects.
2482     if (MI.mayLoadOrStore() || MI.hasUnmodeledSideEffects() ||
2483         (DestReg[0] != AArch64::NoRegister &&
2484          !(ModifiedRegUnits.available(DestReg[0]) &&
2485            UsedRegUnits.available(DestReg[0]))) ||
2486         (DestReg[1] != AArch64::NoRegister &&
2487          !(ModifiedRegUnits.available(DestReg[1]) &&
2488            UsedRegUnits.available(DestReg[1]))))
2489       MergeEither = false;
2490 
2491     // Keep track if we have a memory access before an SP pre-increment, in this
2492     // case we need to validate later that the update amount respects the red
2493     // zone.
2494     if (BaseRegSP && MBBI->mayLoadOrStore())
2495       MemAcessBeforeSPPreInc = true;
2496   } while (MBBI != B && Count < Limit);
2497   return E;
2498 }
2499 
2500 MachineBasicBlock::iterator
2501 AArch64LoadStoreOpt::findMatchingConstOffsetBackward(
2502     MachineBasicBlock::iterator I, unsigned Limit, unsigned &Offset) {
2503   MachineBasicBlock::iterator B = I->getParent()->begin();
2504   MachineBasicBlock::iterator E = I->getParent()->end();
2505   MachineInstr &MemMI = *I;
2506   MachineBasicBlock::iterator MBBI = I;
2507 
2508   // If the load is the first instruction in the block, there's obviously
2509   // not any matching load or store.
2510   if (MBBI == B)
2511     return E;
2512 
2513   // Make sure the IndexReg is killed and the shift amount is zero.
2514   // TODO: Relex this restriction to extend, simplify processing now.
2515   if (!AArch64InstrInfo::getLdStOffsetOp(MemMI).isKill() ||
2516       !AArch64InstrInfo::getLdStAmountOp(MemMI).isImm() ||
2517       (AArch64InstrInfo::getLdStAmountOp(MemMI).getImm() != 0))
2518     return E;
2519 
2520   Register IndexReg = AArch64InstrInfo::getLdStOffsetOp(MemMI).getReg();
2521 
2522   // Track which register units have been modified and used between the first
2523   // insn (inclusive) and the second insn.
2524   ModifiedRegUnits.clear();
2525   UsedRegUnits.clear();
2526   unsigned Count = 0;
2527   do {
2528     MBBI = prev_nodbg(MBBI, B);
2529     MachineInstr &MI = *MBBI;
2530 
2531     // Don't count transient instructions towards the search limit since there
2532     // may be different numbers of them if e.g. debug information is present.
2533     if (!MI.isTransient())
2534       ++Count;
2535 
2536     // If we found a match, return it.
2537     if (isMatchingMovConstInsn(*I, MI, IndexReg, Offset)) {
2538       return MBBI;
2539     }
2540 
2541     // Update the status of what the instruction clobbered and used.
2542     LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits, TRI);
2543 
2544     // Otherwise, if the index register is used or modified, we have no match,
2545     // so return early.
2546     if (!ModifiedRegUnits.available(IndexReg) ||
2547         !UsedRegUnits.available(IndexReg))
2548       return E;
2549 
2550   } while (MBBI != B && Count < Limit);
2551   return E;
2552 }
2553 
2554 bool AArch64LoadStoreOpt::tryToPromoteLoadFromStore(
2555     MachineBasicBlock::iterator &MBBI) {
2556   MachineInstr &MI = *MBBI;
2557   // If this is a volatile load, don't mess with it.
2558   if (MI.hasOrderedMemoryRef())
2559     return false;
2560 
2561   if (needsWinCFI(MI.getMF()) && MI.getFlag(MachineInstr::FrameDestroy))
2562     return false;
2563 
2564   // Make sure this is a reg+imm.
2565   // FIXME: It is possible to extend it to handle reg+reg cases.
2566   if (!AArch64InstrInfo::getLdStOffsetOp(MI).isImm())
2567     return false;
2568 
2569   // Look backward up to LdStLimit instructions.
2570   MachineBasicBlock::iterator StoreI;
2571   if (findMatchingStore(MBBI, LdStLimit, StoreI)) {
2572     ++NumLoadsFromStoresPromoted;
2573     // Promote the load. Keeping the iterator straight is a
2574     // pain, so we let the merge routine tell us what the next instruction
2575     // is after it's done mucking about.
2576     MBBI = promoteLoadFromStore(MBBI, StoreI);
2577     return true;
2578   }
2579   return false;
2580 }
2581 
2582 // Merge adjacent zero stores into a wider store.
2583 bool AArch64LoadStoreOpt::tryToMergeZeroStInst(
2584     MachineBasicBlock::iterator &MBBI) {
2585   assert(isPromotableZeroStoreInst(*MBBI) && "Expected narrow store.");
2586   MachineInstr &MI = *MBBI;
2587   MachineBasicBlock::iterator E = MI.getParent()->end();
2588 
2589   if (!TII->isCandidateToMergeOrPair(MI))
2590     return false;
2591 
2592   // Look ahead up to LdStLimit instructions for a mergable instruction.
2593   LdStPairFlags Flags;
2594   MachineBasicBlock::iterator MergeMI =
2595       findMatchingInsn(MBBI, Flags, LdStLimit, /* FindNarrowMerge = */ true);
2596   if (MergeMI != E) {
2597     ++NumZeroStoresPromoted;
2598 
2599     // Keeping the iterator straight is a pain, so we let the merge routine tell
2600     // us what the next instruction is after it's done mucking about.
2601     MBBI = mergeNarrowZeroStores(MBBI, MergeMI, Flags);
2602     return true;
2603   }
2604   return false;
2605 }
2606 
2607 // Find loads and stores that can be merged into a single load or store pair
2608 // instruction.
2609 bool AArch64LoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) {
2610   MachineInstr &MI = *MBBI;
2611   MachineBasicBlock::iterator E = MI.getParent()->end();
2612 
2613   if (!TII->isCandidateToMergeOrPair(MI))
2614     return false;
2615 
2616   // If disable-ldp feature is opted, do not emit ldp.
2617   if (MI.mayLoad() && Subtarget->hasDisableLdp())
2618     return false;
2619 
2620   // If disable-stp feature is opted, do not emit stp.
2621   if (MI.mayStore() && Subtarget->hasDisableStp())
2622     return false;
2623 
2624   // Early exit if the offset is not possible to match. (6 bits of positive
2625   // range, plus allow an extra one in case we find a later insn that matches
2626   // with Offset-1)
2627   bool IsUnscaled = TII->hasUnscaledLdStOffset(MI);
2628   int Offset = AArch64InstrInfo::getLdStOffsetOp(MI).getImm();
2629   int OffsetStride = IsUnscaled ? TII->getMemScale(MI) : 1;
2630   // Allow one more for offset.
2631   if (Offset > 0)
2632     Offset -= OffsetStride;
2633   if (!inBoundsForPair(IsUnscaled, Offset, OffsetStride))
2634     return false;
2635 
2636   // Look ahead up to LdStLimit instructions for a pairable instruction.
2637   LdStPairFlags Flags;
2638   MachineBasicBlock::iterator Paired =
2639       findMatchingInsn(MBBI, Flags, LdStLimit, /* FindNarrowMerge = */ false);
2640   if (Paired != E) {
2641     // Keeping the iterator straight is a pain, so we let the merge routine tell
2642     // us what the next instruction is after it's done mucking about.
2643     auto Prev = std::prev(MBBI);
2644 
2645     // Fetch the memoperand of the load/store that is a candidate for
2646     // combination.
2647     MachineMemOperand *MemOp =
2648         MI.memoperands_empty() ? nullptr : MI.memoperands().front();
2649 
2650     // If a load/store arrives and ldp/stp-aligned-only feature is opted, check
2651     // that the alignment of the source pointer is at least double the alignment
2652     // of the type.
2653     if ((MI.mayLoad() && Subtarget->hasLdpAlignedOnly()) ||
2654         (MI.mayStore() && Subtarget->hasStpAlignedOnly())) {
2655       // If there is no size/align information, cancel the transformation.
2656       if (!MemOp || !MemOp->getMemoryType().isValid()) {
2657         NumFailedAlignmentCheck++;
2658         return false;
2659       }
2660 
2661       // Get the needed alignments to check them if
2662       // ldp-aligned-only/stp-aligned-only features are opted.
2663       uint64_t MemAlignment = MemOp->getAlign().value();
2664       uint64_t TypeAlignment = Align(MemOp->getSize().getValue()).value();
2665 
2666       if (MemAlignment < 2 * TypeAlignment) {
2667         NumFailedAlignmentCheck++;
2668         return false;
2669       }
2670     }
2671 
2672     ++NumPairCreated;
2673     if (TII->hasUnscaledLdStOffset(MI))
2674       ++NumUnscaledPairCreated;
2675 
2676     MBBI = mergePairedInsns(MBBI, Paired, Flags);
2677     // Collect liveness info for instructions between Prev and the new position
2678     // MBBI.
2679     for (auto I = std::next(Prev); I != MBBI; I++)
2680       updateDefinedRegisters(*I, DefinedInBB, TRI);
2681 
2682     return true;
2683   }
2684   return false;
2685 }
2686 
2687 bool AArch64LoadStoreOpt::tryToMergeLdStUpdate
2688     (MachineBasicBlock::iterator &MBBI) {
2689   MachineInstr &MI = *MBBI;
2690   MachineBasicBlock::iterator E = MI.getParent()->end();
2691   MachineBasicBlock::iterator Update;
2692 
2693   // Look forward to try to form a post-index instruction. For example,
2694   // ldr x0, [x20]
2695   // add x20, x20, #32
2696   //   merged into:
2697   // ldr x0, [x20], #32
2698   Update = findMatchingUpdateInsnForward(MBBI, 0, UpdateLimit);
2699   if (Update != E) {
2700     // Merge the update into the ld/st.
2701     if (auto NextI = mergeUpdateInsn(MBBI, Update, /*IsForward=*/false,
2702                                      /*IsPreIdx=*/false,
2703                                      /*MergeEither=*/false)) {
2704       MBBI = *NextI;
2705       return true;
2706     }
2707   }
2708 
2709   // Don't know how to handle unscaled pre/post-index versions below, so bail.
2710   if (TII->hasUnscaledLdStOffset(MI.getOpcode()))
2711     return false;
2712 
2713   // Look back to try to find a pre-index instruction. For example,
2714   // add x0, x0, #8
2715   // ldr x1, [x0]
2716   //   merged into:
2717   // ldr x1, [x0, #8]!
2718   bool MergeEither;
2719   Update = findMatchingUpdateInsnBackward(MBBI, UpdateLimit, MergeEither);
2720   if (Update != E) {
2721     // Merge the update into the ld/st.
2722     if (auto NextI = mergeUpdateInsn(MBBI, Update, /*IsForward=*/true,
2723                                      /*IsPreIdx=*/true, MergeEither)) {
2724       MBBI = *NextI;
2725       return true;
2726     }
2727   }
2728 
2729   // The immediate in the load/store is scaled by the size of the memory
2730   // operation. The immediate in the add we're looking for,
2731   // however, is not, so adjust here.
2732   int UnscaledOffset =
2733       AArch64InstrInfo::getLdStOffsetOp(MI).getImm() * TII->getMemScale(MI);
2734 
2735   // Look forward to try to find a pre-index instruction. For example,
2736   // ldr x1, [x0, #64]
2737   // add x0, x0, #64
2738   //   merged into:
2739   // ldr x1, [x0, #64]!
2740   Update = findMatchingUpdateInsnForward(MBBI, UnscaledOffset, UpdateLimit);
2741   if (Update != E) {
2742     // Merge the update into the ld/st.
2743     if (auto NextI = mergeUpdateInsn(MBBI, Update, /*IsForward=*/false,
2744                                      /*IsPreIdx=*/true,
2745                                      /*MergeEither=*/false)) {
2746       MBBI = *NextI;
2747       return true;
2748     }
2749   }
2750 
2751   return false;
2752 }
2753 
2754 bool AArch64LoadStoreOpt::tryToMergeIndexLdSt(MachineBasicBlock::iterator &MBBI,
2755                                               int Scale) {
2756   MachineInstr &MI = *MBBI;
2757   MachineBasicBlock::iterator E = MI.getParent()->end();
2758   MachineBasicBlock::iterator Update;
2759 
2760   // Don't know how to handle unscaled pre/post-index versions below, so bail.
2761   if (TII->hasUnscaledLdStOffset(MI.getOpcode()))
2762     return false;
2763 
2764   // Look back to try to find a const offset for index LdSt instruction. For
2765   // example,
2766   // mov x8, #LargeImm   ; = a * (1<<12) + imm12
2767   // ldr x1, [x0, x8]
2768   // merged into:
2769   // add x8, x0, a * (1<<12)
2770   // ldr x1, [x8, imm12]
2771   unsigned Offset;
2772   Update = findMatchingConstOffsetBackward(MBBI, LdStConstLimit, Offset);
2773   if (Update != E && (Offset & (Scale - 1)) == 0) {
2774     // Merge the imm12 into the ld/st.
2775     MBBI = mergeConstOffsetInsn(MBBI, Update, Offset, Scale);
2776     return true;
2777   }
2778 
2779   return false;
2780 }
2781 
2782 bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
2783                                         bool EnableNarrowZeroStOpt) {
2784   AArch64FunctionInfo &AFI = *MBB.getParent()->getInfo<AArch64FunctionInfo>();
2785 
2786   bool Modified = false;
2787   // Four tranformations to do here:
2788   // 1) Find loads that directly read from stores and promote them by
2789   //    replacing with mov instructions. If the store is wider than the load,
2790   //    the load will be replaced with a bitfield extract.
2791   //      e.g.,
2792   //        str w1, [x0, #4]
2793   //        ldrh w2, [x0, #6]
2794   //        ; becomes
2795   //        str w1, [x0, #4]
2796   //        lsr w2, w1, #16
2797   for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
2798        MBBI != E;) {
2799     if (isPromotableLoadFromStore(*MBBI) && tryToPromoteLoadFromStore(MBBI))
2800       Modified = true;
2801     else
2802       ++MBBI;
2803   }
2804   // 2) Merge adjacent zero stores into a wider store.
2805   //      e.g.,
2806   //        strh wzr, [x0]
2807   //        strh wzr, [x0, #2]
2808   //        ; becomes
2809   //        str wzr, [x0]
2810   //      e.g.,
2811   //        str wzr, [x0]
2812   //        str wzr, [x0, #4]
2813   //        ; becomes
2814   //        str xzr, [x0]
2815   if (EnableNarrowZeroStOpt)
2816     for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
2817          MBBI != E;) {
2818       if (isPromotableZeroStoreInst(*MBBI) && tryToMergeZeroStInst(MBBI))
2819         Modified = true;
2820       else
2821         ++MBBI;
2822     }
2823   // 3) Find loads and stores that can be merged into a single load or store
2824   //    pair instruction.
2825   //      e.g.,
2826   //        ldr x0, [x2]
2827   //        ldr x1, [x2, #8]
2828   //        ; becomes
2829   //        ldp x0, x1, [x2]
2830 
2831   if (MBB.getParent()->getRegInfo().tracksLiveness()) {
2832     DefinedInBB.clear();
2833     DefinedInBB.addLiveIns(MBB);
2834   }
2835 
2836   for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
2837        MBBI != E;) {
2838     // Track currently live registers up to this point, to help with
2839     // searching for a rename register on demand.
2840     updateDefinedRegisters(*MBBI, DefinedInBB, TRI);
2841     if (TII->isPairableLdStInst(*MBBI) && tryToPairLdStInst(MBBI))
2842       Modified = true;
2843     else
2844       ++MBBI;
2845   }
2846   // 4) Find base register updates that can be merged into the load or store
2847   //    as a base-reg writeback.
2848   //      e.g.,
2849   //        ldr x0, [x2]
2850   //        add x2, x2, #4
2851   //        ; becomes
2852   //        ldr x0, [x2], #4
2853   for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
2854        MBBI != E;) {
2855     if (isMergeableLdStUpdate(*MBBI, AFI) && tryToMergeLdStUpdate(MBBI))
2856       Modified = true;
2857     else
2858       ++MBBI;
2859   }
2860 
2861   // 5) Find a register assigned with a const value that can be combined with
2862   // into the load or store. e.g.,
2863   //        mov x8, #LargeImm   ; = a * (1<<12) + imm12
2864   //        ldr x1, [x0, x8]
2865   //        ; becomes
2866   //        add x8, x0, a * (1<<12)
2867   //        ldr x1, [x8, imm12]
2868   for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
2869        MBBI != E;) {
2870     int Scale;
2871     if (isMergeableIndexLdSt(*MBBI, Scale) && tryToMergeIndexLdSt(MBBI, Scale))
2872       Modified = true;
2873     else
2874       ++MBBI;
2875   }
2876 
2877   return Modified;
2878 }
2879 
2880 bool AArch64LoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
2881   if (skipFunction(Fn.getFunction()))
2882     return false;
2883 
2884   Subtarget = &Fn.getSubtarget<AArch64Subtarget>();
2885   TII = static_cast<const AArch64InstrInfo *>(Subtarget->getInstrInfo());
2886   TRI = Subtarget->getRegisterInfo();
2887   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2888 
2889   // Resize the modified and used register unit trackers.  We do this once
2890   // per function and then clear the register units each time we optimize a load
2891   // or store.
2892   ModifiedRegUnits.init(*TRI);
2893   UsedRegUnits.init(*TRI);
2894   DefinedInBB.init(*TRI);
2895 
2896   bool Modified = false;
2897   bool enableNarrowZeroStOpt = !Subtarget->requiresStrictAlign();
2898   for (auto &MBB : Fn) {
2899     auto M = optimizeBlock(MBB, enableNarrowZeroStOpt);
2900     Modified |= M;
2901   }
2902 
2903   return Modified;
2904 }
2905 
2906 // FIXME: Do we need/want a pre-alloc pass like ARM has to try to keep loads and
2907 // stores near one another?  Note: The pre-RA instruction scheduler already has
2908 // hooks to try and schedule pairable loads/stores together to improve pairing
2909 // opportunities.  Thus, pre-RA pairing pass may not be worth the effort.
2910 
2911 // FIXME: When pairing store instructions it's very possible for this pass to
2912 // hoist a store with a KILL marker above another use (without a KILL marker).
2913 // The resulting IR is invalid, but nothing uses the KILL markers after this
2914 // pass, so it's never caused a problem in practice.
2915 
2916 /// createAArch64LoadStoreOptimizationPass - returns an instance of the
2917 /// load / store optimization pass.
2918 FunctionPass *llvm::createAArch64LoadStoreOptimizationPass() {
2919   return new AArch64LoadStoreOpt();
2920 }
2921