xref: /llvm-project/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp (revision dbc9ba306198f7793cf29dcbd014151a012facf1)
1 //===-- SILoadStoreOptimizer.cpp ------------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass tries to fuse DS instructions with close by immediate offsets.
11 // This will fuse operations such as
12 //  ds_read_b32 v0, v2 offset:16
13 //  ds_read_b32 v1, v2 offset:32
14 // ==>
15 //   ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
16 //
17 //
18 // Future improvements:
19 //
20 // - This currently relies on the scheduler to place loads and stores next to
21 //   each other, and then only merges adjacent pairs of instructions. It would
22 //   be good to be more flexible with interleaved instructions, and possibly run
23 //   before scheduling. It currently missing stores of constants because loading
24 //   the constant into the data register is placed between the stores, although
25 //   this is arguably a scheduling problem.
26 //
27 // - Live interval recomputing seems inefficient. This currently only matches
28 //   one pair, and recomputes live intervals and moves on to the next pair. It
29 //   would be better to compute a list of all merges that need to occur.
30 //
31 // - With a list of instructions to process, we can also merge more. If a
32 //   cluster of loads have offsets that are too large to fit in the 8-bit
33 //   offsets, but are close enough to fit in the 8 bits, we can add to the base
34 //   pointer and use the new reduced offsets.
35 //
36 //===----------------------------------------------------------------------===//
37 
38 #include "AMDGPU.h"
39 #include "AMDGPUSubtarget.h"
40 #include "SIInstrInfo.h"
41 #include "SIRegisterInfo.h"
42 #include "Utils/AMDGPUBaseInfo.h"
43 #include "llvm/ADT/ArrayRef.h"
44 #include "llvm/ADT/SmallVector.h"
45 #include "llvm/ADT/StringRef.h"
46 #include "llvm/Analysis/AliasAnalysis.h"
47 #include "llvm/CodeGen/MachineBasicBlock.h"
48 #include "llvm/CodeGen/MachineFunction.h"
49 #include "llvm/CodeGen/MachineFunctionPass.h"
50 #include "llvm/CodeGen/MachineInstr.h"
51 #include "llvm/CodeGen/MachineInstrBuilder.h"
52 #include "llvm/CodeGen/MachineOperand.h"
53 #include "llvm/CodeGen/MachineRegisterInfo.h"
54 #include "llvm/IR/DebugLoc.h"
55 #include "llvm/Pass.h"
56 #include "llvm/Support/Debug.h"
57 #include "llvm/Support/MathExtras.h"
58 #include "llvm/Support/raw_ostream.h"
59 #include "llvm/Target/TargetMachine.h"
60 #include <cassert>
61 #include <iterator>
62 #include <utility>
63 
64 using namespace llvm;
65 
66 #define DEBUG_TYPE "si-load-store-opt"
67 
68 namespace {
69 
70 class SILoadStoreOptimizer : public MachineFunctionPass {
71 
72   typedef struct {
73     MachineBasicBlock::iterator I;
74     MachineBasicBlock::iterator Paired;
75     unsigned EltSize;
76     unsigned Offset0;
77     unsigned Offset1;
78     unsigned BaseOff;
79     bool UseST64;
80     SmallVector<MachineInstr*, 8> InstsToMove;
81    } CombineInfo;
82 
83 private:
84   const SIInstrInfo *TII = nullptr;
85   const SIRegisterInfo *TRI = nullptr;
86   MachineRegisterInfo *MRI = nullptr;
87   AliasAnalysis *AA = nullptr;
88 
89   static bool offsetsCanBeCombined(CombineInfo &CI);
90 
91   bool findMatchingDSInst(CombineInfo &CI);
92 
93   MachineBasicBlock::iterator mergeRead2Pair(CombineInfo &CI);
94 
95   MachineBasicBlock::iterator mergeWrite2Pair(CombineInfo &CI);
96 
97 public:
98   static char ID;
99 
100   SILoadStoreOptimizer() : MachineFunctionPass(ID) {}
101 
102   SILoadStoreOptimizer(const TargetMachine &TM_) : MachineFunctionPass(ID) {
103     initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
104   }
105 
106   bool optimizeBlock(MachineBasicBlock &MBB);
107 
108   bool runOnMachineFunction(MachineFunction &MF) override;
109 
110   StringRef getPassName() const override { return "SI Load / Store Optimizer"; }
111 
112   void getAnalysisUsage(AnalysisUsage &AU) const override {
113     AU.setPreservesCFG();
114     AU.addRequired<AAResultsWrapperPass>();
115 
116     MachineFunctionPass::getAnalysisUsage(AU);
117   }
118 };
119 
120 } // end anonymous namespace.
121 
122 INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
123                       "SI Load / Store Optimizer", false, false)
124 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
125 INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE,
126                     "SI Load / Store Optimizer", false, false)
127 
128 char SILoadStoreOptimizer::ID = 0;
129 
130 char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
131 
132 FunctionPass *llvm::createSILoadStoreOptimizerPass(TargetMachine &TM) {
133   return new SILoadStoreOptimizer(TM);
134 }
135 
136 static void moveInstsAfter(MachineBasicBlock::iterator I,
137                            ArrayRef<MachineInstr*> InstsToMove) {
138   MachineBasicBlock *MBB = I->getParent();
139   ++I;
140   for (MachineInstr *MI : InstsToMove) {
141     MI->removeFromParent();
142     MBB->insert(I, MI);
143   }
144 }
145 
146 static void addDefsToList(const MachineInstr &MI,
147                           SmallVectorImpl<const MachineOperand *> &Defs) {
148   for (const MachineOperand &Def : MI.defs()) {
149     Defs.push_back(&Def);
150   }
151 }
152 
153 static bool memAccessesCanBeReordered(MachineBasicBlock::iterator A,
154                                       MachineBasicBlock::iterator B,
155                                       const SIInstrInfo *TII,
156                                       AliasAnalysis * AA) {
157   return (TII->areMemAccessesTriviallyDisjoint(*A, *B, AA) ||
158     // RAW or WAR - cannot reorder
159     // WAW - cannot reorder
160     // RAR - safe to reorder
161     !(A->mayStore() || B->mayStore()));
162 }
163 
164 // Add MI and its defs to the lists if MI reads one of the defs that are
165 // already in the list. Returns true in that case.
166 static bool
167 addToListsIfDependent(MachineInstr &MI,
168                       SmallVectorImpl<const MachineOperand *> &Defs,
169                       SmallVectorImpl<MachineInstr*> &Insts) {
170   for (const MachineOperand *Def : Defs) {
171     bool ReadDef = MI.readsVirtualRegister(Def->getReg());
172     // If ReadDef is true, then there is a use of Def between I
173     // and the instruction that I will potentially be merged with. We
174     // will need to move this instruction after the merged instructions.
175     if (ReadDef) {
176       Insts.push_back(&MI);
177       addDefsToList(MI, Defs);
178       return true;
179     }
180   }
181 
182   return false;
183 }
184 
185 static bool
186 canMoveInstsAcrossMemOp(MachineInstr &MemOp,
187                         ArrayRef<MachineInstr*> InstsToMove,
188                         const SIInstrInfo *TII,
189                         AliasAnalysis *AA) {
190   assert(MemOp.mayLoadOrStore());
191 
192   for (MachineInstr *InstToMove : InstsToMove) {
193     if (!InstToMove->mayLoadOrStore())
194       continue;
195     if (!memAccessesCanBeReordered(MemOp, *InstToMove, TII, AA))
196         return false;
197   }
198   return true;
199 }
200 
201 bool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI) {
202   // XXX - Would the same offset be OK? Is there any reason this would happen or
203   // be useful?
204   if (CI.Offset0 == CI.Offset1)
205     return false;
206 
207   // This won't be valid if the offset isn't aligned.
208   if ((CI.Offset0 % CI.EltSize != 0) || (CI.Offset1 % CI.EltSize != 0))
209     return false;
210 
211   unsigned EltOffset0 = CI.Offset0 / CI.EltSize;
212   unsigned EltOffset1 = CI.Offset1 / CI.EltSize;
213   CI.UseST64 = false;
214   CI.BaseOff = 0;
215 
216   // If the offset in elements doesn't fit in 8-bits, we might be able to use
217   // the stride 64 versions.
218   if ((EltOffset0 % 64 == 0) && (EltOffset1 % 64) == 0 &&
219       isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64)) {
220     CI.Offset0 = EltOffset0 / 64;
221     CI.Offset1 = EltOffset1 / 64;
222     CI.UseST64 = true;
223     return true;
224   }
225 
226   // Check if the new offsets fit in the reduced 8-bit range.
227   if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1)) {
228     CI.Offset0 = EltOffset0;
229     CI.Offset1 = EltOffset1;
230     return true;
231   }
232 
233   // Try to shift base address to decrease offsets.
234   unsigned OffsetDiff = std::abs((int)EltOffset1 - (int)EltOffset0);
235   CI.BaseOff = std::min(CI.Offset0, CI.Offset1);
236 
237   if ((OffsetDiff % 64 == 0) && isUInt<8>(OffsetDiff / 64)) {
238     CI.Offset0 = (EltOffset0 - CI.BaseOff / CI.EltSize) / 64;
239     CI.Offset1 = (EltOffset1 - CI.BaseOff / CI.EltSize) / 64;
240     CI.UseST64 = true;
241     return true;
242   }
243 
244   if (isUInt<8>(OffsetDiff)) {
245     CI.Offset0 = EltOffset0 - CI.BaseOff / CI.EltSize;
246     CI.Offset1 = EltOffset1 - CI.BaseOff / CI.EltSize;
247     return true;
248   }
249 
250   return false;
251 }
252 
253 bool SILoadStoreOptimizer::findMatchingDSInst(CombineInfo &CI) {
254   MachineBasicBlock::iterator E = CI.I->getParent()->end();
255   MachineBasicBlock::iterator MBBI = CI.I;
256   ++MBBI;
257 
258   SmallVector<const MachineOperand *, 8> DefsToMove;
259   addDefsToList(*CI.I, DefsToMove);
260 
261   for ( ; MBBI != E; ++MBBI) {
262     if (MBBI->getOpcode() != CI.I->getOpcode()) {
263 
264       // This is not a matching DS instruction, but we can keep looking as
265       // long as one of these conditions are met:
266       // 1. It is safe to move I down past MBBI.
267       // 2. It is safe to move MBBI down past the instruction that I will
268       //    be merged into.
269 
270       if (MBBI->hasUnmodeledSideEffects())
271         // We can't re-order this instruction with respect to other memory
272         // opeations, so we fail both conditions mentioned above.
273         return false;
274 
275       if (MBBI->mayLoadOrStore() &&
276         !memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA)) {
277         // We fail condition #1, but we may still be able to satisfy condition
278         // #2.  Add this instruction to the move list and then we will check
279         // if condition #2 holds once we have selected the matching instruction.
280         CI.InstsToMove.push_back(&*MBBI);
281         addDefsToList(*MBBI, DefsToMove);
282         continue;
283       }
284 
285       // When we match I with another DS instruction we will be moving I down
286       // to the location of the matched instruction any uses of I will need to
287       // be moved down as well.
288       addToListsIfDependent(*MBBI, DefsToMove, CI.InstsToMove);
289       continue;
290     }
291 
292     // Don't merge volatiles.
293     if (MBBI->hasOrderedMemoryRef())
294       return false;
295 
296     // Handle a case like
297     //   DS_WRITE_B32 addr, v, idx0
298     //   w = DS_READ_B32 addr, idx0
299     //   DS_WRITE_B32 addr, f(w), idx1
300     // where the DS_READ_B32 ends up in InstsToMove and therefore prevents
301     // merging of the two writes.
302     if (addToListsIfDependent(*MBBI, DefsToMove, CI.InstsToMove))
303       continue;
304 
305     int AddrIdx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(),
306                                              AMDGPU::OpName::addr);
307     const MachineOperand &AddrReg0 = CI.I->getOperand(AddrIdx);
308     const MachineOperand &AddrReg1 = MBBI->getOperand(AddrIdx);
309 
310     // Check same base pointer. Be careful of subregisters, which can occur with
311     // vectors of pointers.
312     if (AddrReg0.getReg() == AddrReg1.getReg() &&
313         AddrReg0.getSubReg() == AddrReg1.getSubReg()) {
314       int OffsetIdx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(),
315                                                  AMDGPU::OpName::offset);
316       CI.Offset0 = CI.I->getOperand(OffsetIdx).getImm() & 0xffff;
317       CI.Offset1 = MBBI->getOperand(OffsetIdx).getImm() & 0xffff;
318       CI.Paired = MBBI;
319 
320       // Check both offsets fit in the reduced range.
321       // We also need to go through the list of instructions that we plan to
322       // move and make sure they are all safe to move down past the merged
323       // instruction.
324       if (offsetsCanBeCombined(CI))
325         if (canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))
326           return true;
327     }
328 
329     // We've found a load/store that we couldn't merge for some reason.
330     // We could potentially keep looking, but we'd need to make sure that
331     // it was safe to move I and also all the instruction in InstsToMove
332     // down past this instruction.
333     // check if we can move I across MBBI and if we can move all I's users
334     if (!memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA) ||
335       !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))
336       break;
337   }
338   return false;
339 }
340 
341 MachineBasicBlock::iterator  SILoadStoreOptimizer::mergeRead2Pair(
342   CombineInfo &CI) {
343   MachineBasicBlock *MBB = CI.I->getParent();
344 
345   // Be careful, since the addresses could be subregisters themselves in weird
346   // cases, like vectors of pointers.
347   const auto *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
348 
349   const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdst);
350   const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdst);
351 
352   unsigned NewOffset0 = CI.Offset0;
353   unsigned NewOffset1 = CI.Offset1;
354   unsigned Opc = (CI.EltSize == 4) ? AMDGPU::DS_READ2_B32
355                                    : AMDGPU::DS_READ2_B64;
356 
357   if (CI.UseST64)
358     Opc = (CI.EltSize == 4) ? AMDGPU::DS_READ2ST64_B32
359                             : AMDGPU::DS_READ2ST64_B64;
360 
361   unsigned SubRegIdx0 = (CI.EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
362   unsigned SubRegIdx1 = (CI.EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
363 
364   if (NewOffset0 > NewOffset1) {
365     // Canonicalize the merged instruction so the smaller offset comes first.
366     std::swap(NewOffset0, NewOffset1);
367     std::swap(SubRegIdx0, SubRegIdx1);
368   }
369 
370   assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
371          (NewOffset0 != NewOffset1) &&
372          "Computed offset doesn't fit");
373 
374   const MCInstrDesc &Read2Desc = TII->get(Opc);
375 
376   const TargetRegisterClass *SuperRC
377     = (CI.EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
378   unsigned DestReg = MRI->createVirtualRegister(SuperRC);
379 
380   DebugLoc DL = CI.I->getDebugLoc();
381 
382   unsigned BaseReg = AddrReg->getReg();
383   unsigned BaseRegFlags = 0;
384   if (CI.BaseOff) {
385     BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
386     BaseRegFlags = RegState::Kill;
387     BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::V_ADD_I32_e32), BaseReg)
388          .addImm(CI.BaseOff)
389          .addReg(AddrReg->getReg());
390   }
391 
392   MachineInstrBuilder Read2 = BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg)
393                                 .addReg(BaseReg, BaseRegFlags) // addr
394                                 .addImm(NewOffset0)            // offset0
395                                 .addImm(NewOffset1)            // offset1
396                                 .addImm(0)                     // gds
397                                 .addMemOperand(*CI.I->memoperands_begin())
398                                 .addMemOperand(*CI.Paired->memoperands_begin());
399   (void)Read2;
400 
401   const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
402 
403   // Copy to the old destination registers.
404   BuildMI(*MBB, CI.Paired, DL, CopyDesc)
405       .add(*Dest0) // Copy to same destination including flags and sub reg.
406       .addReg(DestReg, 0, SubRegIdx0);
407   MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
408                             .add(*Dest1)
409                             .addReg(DestReg, RegState::Kill, SubRegIdx1);
410 
411   moveInstsAfter(Copy1, CI.InstsToMove);
412 
413   MachineBasicBlock::iterator Next = std::next(CI.I);
414   CI.I->eraseFromParent();
415   CI.Paired->eraseFromParent();
416 
417   DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
418   return Next;
419 }
420 
421 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
422   CombineInfo &CI) {
423   MachineBasicBlock *MBB = CI.I->getParent();
424 
425   // Be sure to use .addOperand(), and not .addReg() with these. We want to be
426   // sure we preserve the subregister index and any register flags set on them.
427   const MachineOperand *Addr = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
428   const MachineOperand *Data0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::data0);
429   const MachineOperand *Data1
430     = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::data0);
431 
432   unsigned NewOffset0 = CI.Offset0;
433   unsigned NewOffset1 = CI.Offset1;
434   unsigned Opc = (CI.EltSize == 4) ? AMDGPU::DS_WRITE2_B32
435                                    : AMDGPU::DS_WRITE2_B64;
436 
437   if (CI.UseST64)
438     Opc = (CI.EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32
439                             : AMDGPU::DS_WRITE2ST64_B64;
440 
441   if (NewOffset0 > NewOffset1) {
442     // Canonicalize the merged instruction so the smaller offset comes first.
443     std::swap(NewOffset0, NewOffset1);
444     std::swap(Data0, Data1);
445   }
446 
447   assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
448          (NewOffset0 != NewOffset1) &&
449          "Computed offset doesn't fit");
450 
451   const MCInstrDesc &Write2Desc = TII->get(Opc);
452   DebugLoc DL = CI.I->getDebugLoc();
453 
454   unsigned BaseReg = Addr->getReg();
455   unsigned BaseRegFlags = 0;
456   if (CI.BaseOff) {
457     BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
458     BaseRegFlags = RegState::Kill;
459     BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::V_ADD_I32_e32), BaseReg)
460         .addImm(CI.BaseOff)
461         .addReg(Addr->getReg());
462   }
463 
464   MachineInstrBuilder Write2 = BuildMI(*MBB, CI.Paired, DL, Write2Desc)
465                                 .addReg(BaseReg, BaseRegFlags) // addr
466                                 .add(*Data0)                   // data0
467                                 .add(*Data1)                   // data1
468                                 .addImm(NewOffset0)            // offset0
469                                 .addImm(NewOffset1)            // offset1
470                                 .addImm(0)                     // gds
471                                 .addMemOperand(*CI.I->memoperands_begin())
472                                 .addMemOperand(*CI.Paired->memoperands_begin());
473 
474   moveInstsAfter(Write2, CI.InstsToMove);
475 
476   MachineBasicBlock::iterator Next = std::next(CI.I);
477   CI.I->eraseFromParent();
478   CI.Paired->eraseFromParent();
479 
480   DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
481   return Next;
482 }
483 
484 // Scan through looking for adjacent LDS operations with constant offsets from
485 // the same base register. We rely on the scheduler to do the hard work of
486 // clustering nearby loads, and assume these are all adjacent.
487 bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
488   bool Modified = false;
489 
490   for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
491     MachineInstr &MI = *I;
492 
493     // Don't combine if volatile.
494     if (MI.hasOrderedMemoryRef()) {
495       ++I;
496       continue;
497     }
498 
499     CombineInfo CI;
500     CI.I = I;
501     unsigned Opc = MI.getOpcode();
502     if (Opc == AMDGPU::DS_READ_B32 || Opc == AMDGPU::DS_READ_B64) {
503       CI.EltSize = (Opc == AMDGPU::DS_READ_B64) ? 8 : 4;
504       if (findMatchingDSInst(CI)) {
505         Modified = true;
506         I = mergeRead2Pair(CI);
507       } else {
508         ++I;
509       }
510 
511       continue;
512     } else if (Opc == AMDGPU::DS_WRITE_B32 || Opc == AMDGPU::DS_WRITE_B64) {
513       CI.EltSize = (Opc == AMDGPU::DS_WRITE_B64) ? 8 : 4;
514       if (findMatchingDSInst(CI)) {
515         Modified = true;
516         I = mergeWrite2Pair(CI);
517       } else {
518         ++I;
519       }
520 
521       continue;
522     }
523 
524     ++I;
525   }
526 
527   return Modified;
528 }
529 
530 bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
531   if (skipFunction(*MF.getFunction()))
532     return false;
533 
534   const SISubtarget &STM = MF.getSubtarget<SISubtarget>();
535   if (!STM.loadStoreOptEnabled())
536     return false;
537 
538   TII = STM.getInstrInfo();
539   TRI = &TII->getRegisterInfo();
540 
541   MRI = &MF.getRegInfo();
542   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
543 
544   DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
545 
546   bool Modified = false;
547 
548   for (MachineBasicBlock &MBB : MF)
549     Modified |= optimizeBlock(MBB);
550 
551   return Modified;
552 }
553