xref: /netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 //=== AArch64PostLegalizerLowering.cpp --------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// Post-legalization lowering for instructions.
11 ///
12 /// This is used to offload pattern matching from the selector.
13 ///
14 /// For example, this combiner will notice that a G_SHUFFLE_VECTOR is actually
15 /// a G_ZIP, G_UZP, etc.
16 ///
17 /// General optimization combines should be handled by either the
18 /// AArch64PostLegalizerCombiner or the AArch64PreLegalizerCombiner.
19 ///
20 //===----------------------------------------------------------------------===//
21 
22 #include "AArch64GlobalISelUtils.h"
23 #include "AArch64Subtarget.h"
24 #include "AArch64TargetMachine.h"
25 #include "GISel/AArch64LegalizerInfo.h"
26 #include "MCTargetDesc/AArch64MCTargetDesc.h"
27 #include "TargetInfo/AArch64TargetInfo.h"
28 #include "Utils/AArch64BaseInfo.h"
29 #include "llvm/CodeGen/GlobalISel/Combiner.h"
30 #include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
31 #include "llvm/CodeGen/GlobalISel/CombinerInfo.h"
32 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
33 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
34 #include "llvm/CodeGen/GlobalISel/Utils.h"
35 #include "llvm/CodeGen/MachineFunctionPass.h"
36 #include "llvm/CodeGen/MachineInstrBuilder.h"
37 #include "llvm/CodeGen/MachineRegisterInfo.h"
38 #include "llvm/CodeGen/TargetOpcodes.h"
39 #include "llvm/CodeGen/TargetPassConfig.h"
40 #include "llvm/IR/InstrTypes.h"
41 #include "llvm/InitializePasses.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Support/ErrorHandling.h"
44 
45 #define DEBUG_TYPE "aarch64-postlegalizer-lowering"
46 
47 using namespace llvm;
48 using namespace MIPatternMatch;
49 using namespace AArch64GISelUtils;
50 
51 /// Represents a pseudo instruction which replaces a G_SHUFFLE_VECTOR.
52 ///
53 /// Used for matching target-supported shuffles before codegen.
54 struct ShuffleVectorPseudo {
55   unsigned Opc; ///< Opcode for the instruction. (E.g. G_ZIP1)
56   Register Dst; ///< Destination register.
57   SmallVector<SrcOp, 2> SrcOps; ///< Source registers.
ShuffleVectorPseudoShuffleVectorPseudo58   ShuffleVectorPseudo(unsigned Opc, Register Dst,
59                       std::initializer_list<SrcOp> SrcOps)
60       : Opc(Opc), Dst(Dst), SrcOps(SrcOps){};
ShuffleVectorPseudoShuffleVectorPseudo61   ShuffleVectorPseudo() {}
62 };
63 
64 /// Check if a vector shuffle corresponds to a REV instruction with the
65 /// specified blocksize.
isREVMask(ArrayRef<int> M,unsigned EltSize,unsigned NumElts,unsigned BlockSize)66 static bool isREVMask(ArrayRef<int> M, unsigned EltSize, unsigned NumElts,
67                       unsigned BlockSize) {
68   assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) &&
69          "Only possible block sizes for REV are: 16, 32, 64");
70   assert(EltSize != 64 && "EltSize cannot be 64 for REV mask.");
71 
72   unsigned BlockElts = M[0] + 1;
73 
74   // If the first shuffle index is UNDEF, be optimistic.
75   if (M[0] < 0)
76     BlockElts = BlockSize / EltSize;
77 
78   if (BlockSize <= EltSize || BlockSize != BlockElts * EltSize)
79     return false;
80 
81   for (unsigned i = 0; i < NumElts; ++i) {
82     // Ignore undef indices.
83     if (M[i] < 0)
84       continue;
85     if (static_cast<unsigned>(M[i]) !=
86         (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts))
87       return false;
88   }
89 
90   return true;
91 }
92 
93 /// Determines if \p M is a shuffle vector mask for a TRN of \p NumElts.
94 /// Whether or not G_TRN1 or G_TRN2 should be used is stored in \p WhichResult.
isTRNMask(ArrayRef<int> M,unsigned NumElts,unsigned & WhichResult)95 static bool isTRNMask(ArrayRef<int> M, unsigned NumElts,
96                       unsigned &WhichResult) {
97   if (NumElts % 2 != 0)
98     return false;
99   WhichResult = (M[0] == 0 ? 0 : 1);
100   for (unsigned i = 0; i < NumElts; i += 2) {
101     if ((M[i] >= 0 && static_cast<unsigned>(M[i]) != i + WhichResult) ||
102         (M[i + 1] >= 0 &&
103          static_cast<unsigned>(M[i + 1]) != i + NumElts + WhichResult))
104       return false;
105   }
106   return true;
107 }
108 
109 /// Check if a G_EXT instruction can handle a shuffle mask \p M when the vector
110 /// sources of the shuffle are different.
getExtMask(ArrayRef<int> M,unsigned NumElts)111 static Optional<std::pair<bool, uint64_t>> getExtMask(ArrayRef<int> M,
112                                                       unsigned NumElts) {
113   // Look for the first non-undef element.
114   auto FirstRealElt = find_if(M, [](int Elt) { return Elt >= 0; });
115   if (FirstRealElt == M.end())
116     return None;
117 
118   // Use APInt to handle overflow when calculating expected element.
119   unsigned MaskBits = APInt(32, NumElts * 2).logBase2();
120   APInt ExpectedElt = APInt(MaskBits, *FirstRealElt + 1);
121 
122   // The following shuffle indices must be the successive elements after the
123   // first real element.
124   if (any_of(
125           make_range(std::next(FirstRealElt), M.end()),
126           [&ExpectedElt](int Elt) { return Elt != ExpectedElt++ && Elt >= 0; }))
127     return None;
128 
129   // The index of an EXT is the first element if it is not UNDEF.
130   // Watch out for the beginning UNDEFs. The EXT index should be the expected
131   // value of the first element.  E.g.
132   // <-1, -1, 3, ...> is treated as <1, 2, 3, ...>.
133   // <-1, -1, 0, 1, ...> is treated as <2*NumElts-2, 2*NumElts-1, 0, 1, ...>.
134   // ExpectedElt is the last mask index plus 1.
135   uint64_t Imm = ExpectedElt.getZExtValue();
136   bool ReverseExt = false;
137 
138   // There are two difference cases requiring to reverse input vectors.
139   // For example, for vector <4 x i32> we have the following cases,
140   // Case 1: shufflevector(<4 x i32>,<4 x i32>,<-1, -1, -1, 0>)
141   // Case 2: shufflevector(<4 x i32>,<4 x i32>,<-1, -1, 7, 0>)
142   // For both cases, we finally use mask <5, 6, 7, 0>, which requires
143   // to reverse two input vectors.
144   if (Imm < NumElts)
145     ReverseExt = true;
146   else
147     Imm -= NumElts;
148   return std::make_pair(ReverseExt, Imm);
149 }
150 
151 /// Determines if \p M is a shuffle vector mask for a UZP of \p NumElts.
152 /// Whether or not G_UZP1 or G_UZP2 should be used is stored in \p WhichResult.
isUZPMask(ArrayRef<int> M,unsigned NumElts,unsigned & WhichResult)153 static bool isUZPMask(ArrayRef<int> M, unsigned NumElts,
154                       unsigned &WhichResult) {
155   WhichResult = (M[0] == 0 ? 0 : 1);
156   for (unsigned i = 0; i != NumElts; ++i) {
157     // Skip undef indices.
158     if (M[i] < 0)
159       continue;
160     if (static_cast<unsigned>(M[i]) != 2 * i + WhichResult)
161       return false;
162   }
163   return true;
164 }
165 
166 /// \return true if \p M is a zip mask for a shuffle vector of \p NumElts.
167 /// Whether or not G_ZIP1 or G_ZIP2 should be used is stored in \p WhichResult.
isZipMask(ArrayRef<int> M,unsigned NumElts,unsigned & WhichResult)168 static bool isZipMask(ArrayRef<int> M, unsigned NumElts,
169                       unsigned &WhichResult) {
170   if (NumElts % 2 != 0)
171     return false;
172 
173   // 0 means use ZIP1, 1 means use ZIP2.
174   WhichResult = (M[0] == 0 ? 0 : 1);
175   unsigned Idx = WhichResult * NumElts / 2;
176   for (unsigned i = 0; i != NumElts; i += 2) {
177       if ((M[i] >= 0 && static_cast<unsigned>(M[i]) != Idx) ||
178           (M[i + 1] >= 0 && static_cast<unsigned>(M[i + 1]) != Idx + NumElts))
179         return false;
180     Idx += 1;
181   }
182   return true;
183 }
184 
185 /// Helper function for matchINS.
186 ///
187 /// \returns a value when \p M is an ins mask for \p NumInputElements.
188 ///
189 /// First element of the returned pair is true when the produced
190 /// G_INSERT_VECTOR_ELT destination should be the LHS of the G_SHUFFLE_VECTOR.
191 ///
192 /// Second element is the destination lane for the G_INSERT_VECTOR_ELT.
isINSMask(ArrayRef<int> M,int NumInputElements)193 static Optional<std::pair<bool, int>> isINSMask(ArrayRef<int> M,
194                                                 int NumInputElements) {
195   if (M.size() != static_cast<size_t>(NumInputElements))
196     return None;
197   int NumLHSMatch = 0, NumRHSMatch = 0;
198   int LastLHSMismatch = -1, LastRHSMismatch = -1;
199   for (int Idx = 0; Idx < NumInputElements; ++Idx) {
200     if (M[Idx] == -1) {
201       ++NumLHSMatch;
202       ++NumRHSMatch;
203       continue;
204     }
205     M[Idx] == Idx ? ++NumLHSMatch : LastLHSMismatch = Idx;
206     M[Idx] == Idx + NumInputElements ? ++NumRHSMatch : LastRHSMismatch = Idx;
207   }
208   const int NumNeededToMatch = NumInputElements - 1;
209   if (NumLHSMatch == NumNeededToMatch)
210     return std::make_pair(true, LastLHSMismatch);
211   if (NumRHSMatch == NumNeededToMatch)
212     return std::make_pair(false, LastRHSMismatch);
213   return None;
214 }
215 
216 /// \return true if a G_SHUFFLE_VECTOR instruction \p MI can be replaced with a
217 /// G_REV instruction. Returns the appropriate G_REV opcode in \p Opc.
matchREV(MachineInstr & MI,MachineRegisterInfo & MRI,ShuffleVectorPseudo & MatchInfo)218 static bool matchREV(MachineInstr &MI, MachineRegisterInfo &MRI,
219                      ShuffleVectorPseudo &MatchInfo) {
220   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
221   ArrayRef<int> ShuffleMask = MI.getOperand(3).getShuffleMask();
222   Register Dst = MI.getOperand(0).getReg();
223   Register Src = MI.getOperand(1).getReg();
224   LLT Ty = MRI.getType(Dst);
225   unsigned EltSize = Ty.getScalarSizeInBits();
226 
227   // Element size for a rev cannot be 64.
228   if (EltSize == 64)
229     return false;
230 
231   unsigned NumElts = Ty.getNumElements();
232 
233   // Try to produce G_REV64
234   if (isREVMask(ShuffleMask, EltSize, NumElts, 64)) {
235     MatchInfo = ShuffleVectorPseudo(AArch64::G_REV64, Dst, {Src});
236     return true;
237   }
238 
239   // TODO: Produce G_REV32 and G_REV16 once we have proper legalization support.
240   // This should be identical to above, but with a constant 32 and constant
241   // 16.
242   return false;
243 }
244 
245 /// \return true if a G_SHUFFLE_VECTOR instruction \p MI can be replaced with
246 /// a G_TRN1 or G_TRN2 instruction.
matchTRN(MachineInstr & MI,MachineRegisterInfo & MRI,ShuffleVectorPseudo & MatchInfo)247 static bool matchTRN(MachineInstr &MI, MachineRegisterInfo &MRI,
248                      ShuffleVectorPseudo &MatchInfo) {
249   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
250   unsigned WhichResult;
251   ArrayRef<int> ShuffleMask = MI.getOperand(3).getShuffleMask();
252   Register Dst = MI.getOperand(0).getReg();
253   unsigned NumElts = MRI.getType(Dst).getNumElements();
254   if (!isTRNMask(ShuffleMask, NumElts, WhichResult))
255     return false;
256   unsigned Opc = (WhichResult == 0) ? AArch64::G_TRN1 : AArch64::G_TRN2;
257   Register V1 = MI.getOperand(1).getReg();
258   Register V2 = MI.getOperand(2).getReg();
259   MatchInfo = ShuffleVectorPseudo(Opc, Dst, {V1, V2});
260   return true;
261 }
262 
263 /// \return true if a G_SHUFFLE_VECTOR instruction \p MI can be replaced with
264 /// a G_UZP1 or G_UZP2 instruction.
265 ///
266 /// \param [in] MI - The shuffle vector instruction.
267 /// \param [out] MatchInfo - Either G_UZP1 or G_UZP2 on success.
matchUZP(MachineInstr & MI,MachineRegisterInfo & MRI,ShuffleVectorPseudo & MatchInfo)268 static bool matchUZP(MachineInstr &MI, MachineRegisterInfo &MRI,
269                      ShuffleVectorPseudo &MatchInfo) {
270   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
271   unsigned WhichResult;
272   ArrayRef<int> ShuffleMask = MI.getOperand(3).getShuffleMask();
273   Register Dst = MI.getOperand(0).getReg();
274   unsigned NumElts = MRI.getType(Dst).getNumElements();
275   if (!isUZPMask(ShuffleMask, NumElts, WhichResult))
276     return false;
277   unsigned Opc = (WhichResult == 0) ? AArch64::G_UZP1 : AArch64::G_UZP2;
278   Register V1 = MI.getOperand(1).getReg();
279   Register V2 = MI.getOperand(2).getReg();
280   MatchInfo = ShuffleVectorPseudo(Opc, Dst, {V1, V2});
281   return true;
282 }
283 
matchZip(MachineInstr & MI,MachineRegisterInfo & MRI,ShuffleVectorPseudo & MatchInfo)284 static bool matchZip(MachineInstr &MI, MachineRegisterInfo &MRI,
285                      ShuffleVectorPseudo &MatchInfo) {
286   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
287   unsigned WhichResult;
288   ArrayRef<int> ShuffleMask = MI.getOperand(3).getShuffleMask();
289   Register Dst = MI.getOperand(0).getReg();
290   unsigned NumElts = MRI.getType(Dst).getNumElements();
291   if (!isZipMask(ShuffleMask, NumElts, WhichResult))
292     return false;
293   unsigned Opc = (WhichResult == 0) ? AArch64::G_ZIP1 : AArch64::G_ZIP2;
294   Register V1 = MI.getOperand(1).getReg();
295   Register V2 = MI.getOperand(2).getReg();
296   MatchInfo = ShuffleVectorPseudo(Opc, Dst, {V1, V2});
297   return true;
298 }
299 
300 /// Helper function for matchDup.
matchDupFromInsertVectorElt(int Lane,MachineInstr & MI,MachineRegisterInfo & MRI,ShuffleVectorPseudo & MatchInfo)301 static bool matchDupFromInsertVectorElt(int Lane, MachineInstr &MI,
302                                         MachineRegisterInfo &MRI,
303                                         ShuffleVectorPseudo &MatchInfo) {
304   if (Lane != 0)
305     return false;
306 
307   // Try to match a vector splat operation into a dup instruction.
308   // We're looking for this pattern:
309   //
310   // %scalar:gpr(s64) = COPY $x0
311   // %undef:fpr(<2 x s64>) = G_IMPLICIT_DEF
312   // %cst0:gpr(s32) = G_CONSTANT i32 0
313   // %zerovec:fpr(<2 x s32>) = G_BUILD_VECTOR %cst0(s32), %cst0(s32)
314   // %ins:fpr(<2 x s64>) = G_INSERT_VECTOR_ELT %undef, %scalar(s64), %cst0(s32)
315   // %splat:fpr(<2 x s64>) = G_SHUFFLE_VECTOR %ins(<2 x s64>), %undef, %zerovec(<2 x s32>)
316   //
317   // ...into:
318   // %splat = G_DUP %scalar
319 
320   // Begin matching the insert.
321   auto *InsMI = getOpcodeDef(TargetOpcode::G_INSERT_VECTOR_ELT,
322                              MI.getOperand(1).getReg(), MRI);
323   if (!InsMI)
324     return false;
325   // Match the undef vector operand.
326   if (!getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, InsMI->getOperand(1).getReg(),
327                     MRI))
328     return false;
329 
330   // Match the index constant 0.
331   if (!mi_match(InsMI->getOperand(3).getReg(), MRI, m_ZeroInt()))
332     return false;
333 
334   MatchInfo = ShuffleVectorPseudo(AArch64::G_DUP, MI.getOperand(0).getReg(),
335                                   {InsMI->getOperand(2).getReg()});
336   return true;
337 }
338 
339 /// Helper function for matchDup.
matchDupFromBuildVector(int Lane,MachineInstr & MI,MachineRegisterInfo & MRI,ShuffleVectorPseudo & MatchInfo)340 static bool matchDupFromBuildVector(int Lane, MachineInstr &MI,
341                                     MachineRegisterInfo &MRI,
342                                     ShuffleVectorPseudo &MatchInfo) {
343   assert(Lane >= 0 && "Expected positive lane?");
344   // Test if the LHS is a BUILD_VECTOR. If it is, then we can just reference the
345   // lane's definition directly.
346   auto *BuildVecMI = getOpcodeDef(TargetOpcode::G_BUILD_VECTOR,
347                                   MI.getOperand(1).getReg(), MRI);
348   if (!BuildVecMI)
349     return false;
350   Register Reg = BuildVecMI->getOperand(Lane + 1).getReg();
351   MatchInfo =
352       ShuffleVectorPseudo(AArch64::G_DUP, MI.getOperand(0).getReg(), {Reg});
353   return true;
354 }
355 
matchDup(MachineInstr & MI,MachineRegisterInfo & MRI,ShuffleVectorPseudo & MatchInfo)356 static bool matchDup(MachineInstr &MI, MachineRegisterInfo &MRI,
357                      ShuffleVectorPseudo &MatchInfo) {
358   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
359   auto MaybeLane = getSplatIndex(MI);
360   if (!MaybeLane)
361     return false;
362   int Lane = *MaybeLane;
363   // If this is undef splat, generate it via "just" vdup, if possible.
364   if (Lane < 0)
365     Lane = 0;
366   if (matchDupFromInsertVectorElt(Lane, MI, MRI, MatchInfo))
367     return true;
368   if (matchDupFromBuildVector(Lane, MI, MRI, MatchInfo))
369     return true;
370   return false;
371 }
372 
matchEXT(MachineInstr & MI,MachineRegisterInfo & MRI,ShuffleVectorPseudo & MatchInfo)373 static bool matchEXT(MachineInstr &MI, MachineRegisterInfo &MRI,
374                      ShuffleVectorPseudo &MatchInfo) {
375   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
376   Register Dst = MI.getOperand(0).getReg();
377   auto ExtInfo = getExtMask(MI.getOperand(3).getShuffleMask(),
378                             MRI.getType(Dst).getNumElements());
379   if (!ExtInfo)
380     return false;
381   bool ReverseExt;
382   uint64_t Imm;
383   std::tie(ReverseExt, Imm) = *ExtInfo;
384   Register V1 = MI.getOperand(1).getReg();
385   Register V2 = MI.getOperand(2).getReg();
386   if (ReverseExt)
387     std::swap(V1, V2);
388   uint64_t ExtFactor = MRI.getType(V1).getScalarSizeInBits() / 8;
389   Imm *= ExtFactor;
390   MatchInfo = ShuffleVectorPseudo(AArch64::G_EXT, Dst, {V1, V2, Imm});
391   return true;
392 }
393 
394 /// Replace a G_SHUFFLE_VECTOR instruction with a pseudo.
395 /// \p Opc is the opcode to use. \p MI is the G_SHUFFLE_VECTOR.
applyShuffleVectorPseudo(MachineInstr & MI,ShuffleVectorPseudo & MatchInfo)396 static bool applyShuffleVectorPseudo(MachineInstr &MI,
397                                      ShuffleVectorPseudo &MatchInfo) {
398   MachineIRBuilder MIRBuilder(MI);
399   MIRBuilder.buildInstr(MatchInfo.Opc, {MatchInfo.Dst}, MatchInfo.SrcOps);
400   MI.eraseFromParent();
401   return true;
402 }
403 
404 /// Replace a G_SHUFFLE_VECTOR instruction with G_EXT.
405 /// Special-cased because the constant operand must be emitted as a G_CONSTANT
406 /// for the imported tablegen patterns to work.
applyEXT(MachineInstr & MI,ShuffleVectorPseudo & MatchInfo)407 static bool applyEXT(MachineInstr &MI, ShuffleVectorPseudo &MatchInfo) {
408   MachineIRBuilder MIRBuilder(MI);
409   // Tablegen patterns expect an i32 G_CONSTANT as the final op.
410   auto Cst =
411       MIRBuilder.buildConstant(LLT::scalar(32), MatchInfo.SrcOps[2].getImm());
412   MIRBuilder.buildInstr(MatchInfo.Opc, {MatchInfo.Dst},
413                         {MatchInfo.SrcOps[0], MatchInfo.SrcOps[1], Cst});
414   MI.eraseFromParent();
415   return true;
416 }
417 
418 /// Match a G_SHUFFLE_VECTOR with a mask which corresponds to a
419 /// G_INSERT_VECTOR_ELT and G_EXTRACT_VECTOR_ELT pair.
420 ///
421 /// e.g.
422 ///   %shuf = G_SHUFFLE_VECTOR %left, %right, shufflemask(0, 0)
423 ///
424 /// Can be represented as
425 ///
426 ///   %extract = G_EXTRACT_VECTOR_ELT %left, 0
427 ///   %ins = G_INSERT_VECTOR_ELT %left, %extract, 1
428 ///
matchINS(MachineInstr & MI,MachineRegisterInfo & MRI,std::tuple<Register,int,Register,int> & MatchInfo)429 static bool matchINS(MachineInstr &MI, MachineRegisterInfo &MRI,
430                      std::tuple<Register, int, Register, int> &MatchInfo) {
431   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
432   ArrayRef<int> ShuffleMask = MI.getOperand(3).getShuffleMask();
433   Register Dst = MI.getOperand(0).getReg();
434   int NumElts = MRI.getType(Dst).getNumElements();
435   auto DstIsLeftAndDstLane = isINSMask(ShuffleMask, NumElts);
436   if (!DstIsLeftAndDstLane)
437     return false;
438   bool DstIsLeft;
439   int DstLane;
440   std::tie(DstIsLeft, DstLane) = *DstIsLeftAndDstLane;
441   Register Left = MI.getOperand(1).getReg();
442   Register Right = MI.getOperand(2).getReg();
443   Register DstVec = DstIsLeft ? Left : Right;
444   Register SrcVec = Left;
445 
446   int SrcLane = ShuffleMask[DstLane];
447   if (SrcLane >= NumElts) {
448     SrcVec = Right;
449     SrcLane -= NumElts;
450   }
451 
452   MatchInfo = std::make_tuple(DstVec, DstLane, SrcVec, SrcLane);
453   return true;
454 }
455 
applyINS(MachineInstr & MI,MachineRegisterInfo & MRI,MachineIRBuilder & Builder,std::tuple<Register,int,Register,int> & MatchInfo)456 static bool applyINS(MachineInstr &MI, MachineRegisterInfo &MRI,
457                      MachineIRBuilder &Builder,
458                      std::tuple<Register, int, Register, int> &MatchInfo) {
459   Builder.setInstrAndDebugLoc(MI);
460   Register Dst = MI.getOperand(0).getReg();
461   auto ScalarTy = MRI.getType(Dst).getElementType();
462   Register DstVec, SrcVec;
463   int DstLane, SrcLane;
464   std::tie(DstVec, DstLane, SrcVec, SrcLane) = MatchInfo;
465   auto SrcCst = Builder.buildConstant(LLT::scalar(64), SrcLane);
466   auto Extract = Builder.buildExtractVectorElement(ScalarTy, SrcVec, SrcCst);
467   auto DstCst = Builder.buildConstant(LLT::scalar(64), DstLane);
468   Builder.buildInsertVectorElement(Dst, DstVec, Extract, DstCst);
469   MI.eraseFromParent();
470   return true;
471 }
472 
473 /// isVShiftRImm - Check if this is a valid vector for the immediate
474 /// operand of a vector shift right operation. The value must be in the range:
475 ///   1 <= Value <= ElementBits for a right shift.
isVShiftRImm(Register Reg,MachineRegisterInfo & MRI,LLT Ty,int64_t & Cnt)476 static bool isVShiftRImm(Register Reg, MachineRegisterInfo &MRI, LLT Ty,
477                          int64_t &Cnt) {
478   assert(Ty.isVector() && "vector shift count is not a vector type");
479   MachineInstr *MI = MRI.getVRegDef(Reg);
480   auto Cst = getAArch64VectorSplatScalar(*MI, MRI);
481   if (!Cst)
482     return false;
483   Cnt = *Cst;
484   int64_t ElementBits = Ty.getScalarSizeInBits();
485   return Cnt >= 1 && Cnt <= ElementBits;
486 }
487 
488 /// Match a vector G_ASHR or G_LSHR with a valid immediate shift.
matchVAshrLshrImm(MachineInstr & MI,MachineRegisterInfo & MRI,int64_t & Imm)489 static bool matchVAshrLshrImm(MachineInstr &MI, MachineRegisterInfo &MRI,
490                               int64_t &Imm) {
491   assert(MI.getOpcode() == TargetOpcode::G_ASHR ||
492          MI.getOpcode() == TargetOpcode::G_LSHR);
493   LLT Ty = MRI.getType(MI.getOperand(1).getReg());
494   if (!Ty.isVector())
495     return false;
496   return isVShiftRImm(MI.getOperand(2).getReg(), MRI, Ty, Imm);
497 }
498 
applyVAshrLshrImm(MachineInstr & MI,MachineRegisterInfo & MRI,int64_t & Imm)499 static bool applyVAshrLshrImm(MachineInstr &MI, MachineRegisterInfo &MRI,
500                               int64_t &Imm) {
501   unsigned Opc = MI.getOpcode();
502   assert(Opc == TargetOpcode::G_ASHR || Opc == TargetOpcode::G_LSHR);
503   unsigned NewOpc =
504       Opc == TargetOpcode::G_ASHR ? AArch64::G_VASHR : AArch64::G_VLSHR;
505   MachineIRBuilder MIB(MI);
506   auto ImmDef = MIB.buildConstant(LLT::scalar(32), Imm);
507   MIB.buildInstr(NewOpc, {MI.getOperand(0)}, {MI.getOperand(1), ImmDef});
508   MI.eraseFromParent();
509   return true;
510 }
511 
512 /// Determine if it is possible to modify the \p RHS and predicate \p P of a
513 /// G_ICMP instruction such that the right-hand side is an arithmetic immediate.
514 ///
515 /// \returns A pair containing the updated immediate and predicate which may
516 /// be used to optimize the instruction.
517 ///
518 /// \note This assumes that the comparison has been legalized.
519 Optional<std::pair<uint64_t, CmpInst::Predicate>>
tryAdjustICmpImmAndPred(Register RHS,CmpInst::Predicate P,const MachineRegisterInfo & MRI)520 tryAdjustICmpImmAndPred(Register RHS, CmpInst::Predicate P,
521                           const MachineRegisterInfo &MRI) {
522   const auto &Ty = MRI.getType(RHS);
523   if (Ty.isVector())
524     return None;
525   unsigned Size = Ty.getSizeInBits();
526   assert((Size == 32 || Size == 64) && "Expected 32 or 64 bit compare only?");
527 
528   // If the RHS is not a constant, or the RHS is already a valid arithmetic
529   // immediate, then there is nothing to change.
530   auto ValAndVReg = getConstantVRegValWithLookThrough(RHS, MRI);
531   if (!ValAndVReg)
532     return None;
533   uint64_t C = ValAndVReg->Value.getZExtValue();
534   if (isLegalArithImmed(C))
535     return None;
536 
537   // We have a non-arithmetic immediate. Check if adjusting the immediate and
538   // adjusting the predicate will result in a legal arithmetic immediate.
539   switch (P) {
540   default:
541     return None;
542   case CmpInst::ICMP_SLT:
543   case CmpInst::ICMP_SGE:
544     // Check for
545     //
546     // x slt c => x sle c - 1
547     // x sge c => x sgt c - 1
548     //
549     // When c is not the smallest possible negative number.
550     if ((Size == 64 && static_cast<int64_t>(C) == INT64_MIN) ||
551         (Size == 32 && static_cast<int32_t>(C) == INT32_MIN))
552       return None;
553     P = (P == CmpInst::ICMP_SLT) ? CmpInst::ICMP_SLE : CmpInst::ICMP_SGT;
554     C -= 1;
555     break;
556   case CmpInst::ICMP_ULT:
557   case CmpInst::ICMP_UGE:
558     // Check for
559     //
560     // x ult c => x ule c - 1
561     // x uge c => x ugt c - 1
562     //
563     // When c is not zero.
564     if (C == 0)
565       return None;
566     P = (P == CmpInst::ICMP_ULT) ? CmpInst::ICMP_ULE : CmpInst::ICMP_UGT;
567     C -= 1;
568     break;
569   case CmpInst::ICMP_SLE:
570   case CmpInst::ICMP_SGT:
571     // Check for
572     //
573     // x sle c => x slt c + 1
574     // x sgt c => s sge c + 1
575     //
576     // When c is not the largest possible signed integer.
577     if ((Size == 32 && static_cast<int32_t>(C) == INT32_MAX) ||
578         (Size == 64 && static_cast<int64_t>(C) == INT64_MAX))
579       return None;
580     P = (P == CmpInst::ICMP_SLE) ? CmpInst::ICMP_SLT : CmpInst::ICMP_SGE;
581     C += 1;
582     break;
583   case CmpInst::ICMP_ULE:
584   case CmpInst::ICMP_UGT:
585     // Check for
586     //
587     // x ule c => x ult c + 1
588     // x ugt c => s uge c + 1
589     //
590     // When c is not the largest possible unsigned integer.
591     if ((Size == 32 && static_cast<uint32_t>(C) == UINT32_MAX) ||
592         (Size == 64 && C == UINT64_MAX))
593       return None;
594     P = (P == CmpInst::ICMP_ULE) ? CmpInst::ICMP_ULT : CmpInst::ICMP_UGE;
595     C += 1;
596     break;
597   }
598 
599   // Check if the new constant is valid, and return the updated constant and
600   // predicate if it is.
601   if (Size == 32)
602     C = static_cast<uint32_t>(C);
603   if (!isLegalArithImmed(C))
604     return None;
605   return {{C, P}};
606 }
607 
608 /// Determine whether or not it is possible to update the RHS and predicate of
609 /// a G_ICMP instruction such that the RHS will be selected as an arithmetic
610 /// immediate.
611 ///
612 /// \p MI - The G_ICMP instruction
613 /// \p MatchInfo - The new RHS immediate and predicate on success
614 ///
615 /// See tryAdjustICmpImmAndPred for valid transformations.
matchAdjustICmpImmAndPred(MachineInstr & MI,const MachineRegisterInfo & MRI,std::pair<uint64_t,CmpInst::Predicate> & MatchInfo)616 bool matchAdjustICmpImmAndPred(
617     MachineInstr &MI, const MachineRegisterInfo &MRI,
618     std::pair<uint64_t, CmpInst::Predicate> &MatchInfo) {
619   assert(MI.getOpcode() == TargetOpcode::G_ICMP);
620   Register RHS = MI.getOperand(3).getReg();
621   auto Pred = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
622   if (auto MaybeNewImmAndPred = tryAdjustICmpImmAndPred(RHS, Pred, MRI)) {
623     MatchInfo = *MaybeNewImmAndPred;
624     return true;
625   }
626   return false;
627 }
628 
applyAdjustICmpImmAndPred(MachineInstr & MI,std::pair<uint64_t,CmpInst::Predicate> & MatchInfo,MachineIRBuilder & MIB,GISelChangeObserver & Observer)629 bool applyAdjustICmpImmAndPred(
630     MachineInstr &MI, std::pair<uint64_t, CmpInst::Predicate> &MatchInfo,
631     MachineIRBuilder &MIB, GISelChangeObserver &Observer) {
632   MIB.setInstrAndDebugLoc(MI);
633   MachineOperand &RHS = MI.getOperand(3);
634   MachineRegisterInfo &MRI = *MIB.getMRI();
635   auto Cst = MIB.buildConstant(MRI.cloneVirtualRegister(RHS.getReg()),
636                                MatchInfo.first);
637   Observer.changingInstr(MI);
638   RHS.setReg(Cst->getOperand(0).getReg());
639   MI.getOperand(1).setPredicate(MatchInfo.second);
640   Observer.changedInstr(MI);
641   return true;
642 }
643 
matchDupLane(MachineInstr & MI,MachineRegisterInfo & MRI,std::pair<unsigned,int> & MatchInfo)644 bool matchDupLane(MachineInstr &MI, MachineRegisterInfo &MRI,
645                   std::pair<unsigned, int> &MatchInfo) {
646   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
647   Register Src1Reg = MI.getOperand(1).getReg();
648   const LLT SrcTy = MRI.getType(Src1Reg);
649   const LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
650 
651   auto LaneIdx = getSplatIndex(MI);
652   if (!LaneIdx)
653     return false;
654 
655   // The lane idx should be within the first source vector.
656   if (*LaneIdx >= SrcTy.getNumElements())
657     return false;
658 
659   if (DstTy != SrcTy)
660     return false;
661 
662   LLT ScalarTy = SrcTy.getElementType();
663   unsigned ScalarSize = ScalarTy.getSizeInBits();
664 
665   unsigned Opc = 0;
666   switch (SrcTy.getNumElements()) {
667   case 2:
668     if (ScalarSize == 64)
669       Opc = AArch64::G_DUPLANE64;
670     else if (ScalarSize == 32)
671       Opc = AArch64::G_DUPLANE32;
672     break;
673   case 4:
674     if (ScalarSize == 32)
675       Opc = AArch64::G_DUPLANE32;
676     break;
677   case 8:
678     if (ScalarSize == 16)
679       Opc = AArch64::G_DUPLANE16;
680     break;
681   case 16:
682     if (ScalarSize == 8)
683       Opc = AArch64::G_DUPLANE8;
684     break;
685   default:
686     break;
687   }
688   if (!Opc)
689     return false;
690 
691   MatchInfo.first = Opc;
692   MatchInfo.second = *LaneIdx;
693   return true;
694 }
695 
applyDupLane(MachineInstr & MI,MachineRegisterInfo & MRI,MachineIRBuilder & B,std::pair<unsigned,int> & MatchInfo)696 bool applyDupLane(MachineInstr &MI, MachineRegisterInfo &MRI,
697                   MachineIRBuilder &B, std::pair<unsigned, int> &MatchInfo) {
698   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
699   Register Src1Reg = MI.getOperand(1).getReg();
700   const LLT SrcTy = MRI.getType(Src1Reg);
701 
702   B.setInstrAndDebugLoc(MI);
703   auto Lane = B.buildConstant(LLT::scalar(64), MatchInfo.second);
704 
705   Register DupSrc = MI.getOperand(1).getReg();
706   // For types like <2 x s32>, we can use G_DUPLANE32, with a <4 x s32> source.
707   // To do this, we can use a G_CONCAT_VECTORS to do the widening.
708   if (SrcTy == LLT::vector(2, LLT::scalar(32))) {
709     assert(MRI.getType(MI.getOperand(0).getReg()).getNumElements() == 2 &&
710            "Unexpected dest elements");
711     auto Undef = B.buildUndef(SrcTy);
712     DupSrc = B.buildConcatVectors(SrcTy.changeNumElements(4),
713                                   {Src1Reg, Undef.getReg(0)})
714                  .getReg(0);
715   }
716   B.buildInstr(MatchInfo.first, {MI.getOperand(0).getReg()}, {DupSrc, Lane});
717   MI.eraseFromParent();
718   return true;
719 }
720 
matchBuildVectorToDup(MachineInstr & MI,MachineRegisterInfo & MRI)721 static bool matchBuildVectorToDup(MachineInstr &MI, MachineRegisterInfo &MRI) {
722   assert(MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
723   auto Splat = getAArch64VectorSplat(MI, MRI);
724   if (!Splat)
725     return false;
726   if (Splat->isReg())
727     return true;
728   // Later, during selection, we'll try to match imported patterns using
729   // immAllOnesV and immAllZerosV. These require G_BUILD_VECTOR. Don't lower
730   // G_BUILD_VECTORs which could match those patterns.
731   int64_t Cst = Splat->getCst();
732   return (Cst != 0 && Cst != -1);
733 }
734 
applyBuildVectorToDup(MachineInstr & MI,MachineRegisterInfo & MRI,MachineIRBuilder & B)735 static bool applyBuildVectorToDup(MachineInstr &MI, MachineRegisterInfo &MRI,
736                                   MachineIRBuilder &B) {
737   B.setInstrAndDebugLoc(MI);
738   B.buildInstr(AArch64::G_DUP, {MI.getOperand(0).getReg()},
739                {MI.getOperand(1).getReg()});
740   MI.eraseFromParent();
741   return true;
742 }
743 
744 /// \returns how many instructions would be saved by folding a G_ICMP's shift
745 /// and/or extension operations.
getCmpOperandFoldingProfit(Register CmpOp,const MachineRegisterInfo & MRI)746 static unsigned getCmpOperandFoldingProfit(Register CmpOp,
747                                            const MachineRegisterInfo &MRI) {
748   // No instructions to save if there's more than one use or no uses.
749   if (!MRI.hasOneNonDBGUse(CmpOp))
750     return 0;
751 
752   // FIXME: This is duplicated with the selector. (See: selectShiftedRegister)
753   auto IsSupportedExtend = [&](const MachineInstr &MI) {
754     if (MI.getOpcode() == TargetOpcode::G_SEXT_INREG)
755       return true;
756     if (MI.getOpcode() != TargetOpcode::G_AND)
757       return false;
758     auto ValAndVReg =
759         getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
760     if (!ValAndVReg)
761       return false;
762     uint64_t Mask = ValAndVReg->Value.getZExtValue();
763     return (Mask == 0xFF || Mask == 0xFFFF || Mask == 0xFFFFFFFF);
764   };
765 
766   MachineInstr *Def = getDefIgnoringCopies(CmpOp, MRI);
767   if (IsSupportedExtend(*Def))
768     return 1;
769 
770   unsigned Opc = Def->getOpcode();
771   if (Opc != TargetOpcode::G_SHL && Opc != TargetOpcode::G_ASHR &&
772       Opc != TargetOpcode::G_LSHR)
773     return 0;
774 
775   auto MaybeShiftAmt =
776       getConstantVRegValWithLookThrough(Def->getOperand(2).getReg(), MRI);
777   if (!MaybeShiftAmt)
778     return 0;
779   uint64_t ShiftAmt = MaybeShiftAmt->Value.getZExtValue();
780   MachineInstr *ShiftLHS =
781       getDefIgnoringCopies(Def->getOperand(1).getReg(), MRI);
782 
783   // Check if we can fold an extend and a shift.
784   // FIXME: This is duplicated with the selector. (See:
785   // selectArithExtendedRegister)
786   if (IsSupportedExtend(*ShiftLHS))
787     return (ShiftAmt <= 4) ? 2 : 1;
788 
789   LLT Ty = MRI.getType(Def->getOperand(0).getReg());
790   if (Ty.isVector())
791     return 0;
792   unsigned ShiftSize = Ty.getSizeInBits();
793   if ((ShiftSize == 32 && ShiftAmt <= 31) ||
794       (ShiftSize == 64 && ShiftAmt <= 63))
795     return 1;
796   return 0;
797 }
798 
799 /// \returns true if it would be profitable to swap the LHS and RHS of a G_ICMP
800 /// instruction \p MI.
trySwapICmpOperands(MachineInstr & MI,const MachineRegisterInfo & MRI)801 static bool trySwapICmpOperands(MachineInstr &MI,
802                                  const MachineRegisterInfo &MRI) {
803   assert(MI.getOpcode() == TargetOpcode::G_ICMP);
804   // Swap the operands if it would introduce a profitable folding opportunity.
805   // (e.g. a shift + extend).
806   //
807   //  For example:
808   //    lsl     w13, w11, #1
809   //    cmp     w13, w12
810   // can be turned into:
811   //    cmp     w12, w11, lsl #1
812 
813   // Don't swap if there's a constant on the RHS, because we know we can fold
814   // that.
815   Register RHS = MI.getOperand(3).getReg();
816   auto RHSCst = getConstantVRegValWithLookThrough(RHS, MRI);
817   if (RHSCst && isLegalArithImmed(RHSCst->Value.getSExtValue()))
818     return false;
819 
820   Register LHS = MI.getOperand(2).getReg();
821   auto Pred = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
822   auto GetRegForProfit = [&](Register Reg) {
823     MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
824     return isCMN(Def, Pred, MRI) ? Def->getOperand(2).getReg() : Reg;
825   };
826 
827   // Don't have a constant on the RHS. If we swap the LHS and RHS of the
828   // compare, would we be able to fold more instructions?
829   Register TheLHS = GetRegForProfit(LHS);
830   Register TheRHS = GetRegForProfit(RHS);
831 
832   // If the LHS is more likely to give us a folding opportunity, then swap the
833   // LHS and RHS.
834   return (getCmpOperandFoldingProfit(TheLHS, MRI) >
835           getCmpOperandFoldingProfit(TheRHS, MRI));
836 }
837 
applySwapICmpOperands(MachineInstr & MI,GISelChangeObserver & Observer)838 static bool applySwapICmpOperands(MachineInstr &MI,
839                                    GISelChangeObserver &Observer) {
840   auto Pred = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
841   Register LHS = MI.getOperand(2).getReg();
842   Register RHS = MI.getOperand(3).getReg();
843   Observer.changedInstr(MI);
844   MI.getOperand(1).setPredicate(CmpInst::getSwappedPredicate(Pred));
845   MI.getOperand(2).setReg(RHS);
846   MI.getOperand(3).setReg(LHS);
847   Observer.changedInstr(MI);
848   return true;
849 }
850 
851 /// \returns a function which builds a vector floating point compare instruction
852 /// for a condition code \p CC.
853 /// \param [in] IsZero - True if the comparison is against 0.
854 /// \param [in] NoNans - True if the target has NoNansFPMath.
855 static std::function<Register(MachineIRBuilder &)>
getVectorFCMP(AArch64CC::CondCode CC,Register LHS,Register RHS,bool IsZero,bool NoNans,MachineRegisterInfo & MRI)856 getVectorFCMP(AArch64CC::CondCode CC, Register LHS, Register RHS, bool IsZero,
857               bool NoNans, MachineRegisterInfo &MRI) {
858   LLT DstTy = MRI.getType(LHS);
859   assert(DstTy.isVector() && "Expected vector types only?");
860   assert(DstTy == MRI.getType(RHS) && "Src and Dst types must match!");
861   switch (CC) {
862   default:
863     llvm_unreachable("Unexpected condition code!");
864   case AArch64CC::NE:
865     return [LHS, RHS, IsZero, DstTy](MachineIRBuilder &MIB) {
866       auto FCmp = IsZero
867                       ? MIB.buildInstr(AArch64::G_FCMEQZ, {DstTy}, {LHS})
868                       : MIB.buildInstr(AArch64::G_FCMEQ, {DstTy}, {LHS, RHS});
869       return MIB.buildNot(DstTy, FCmp).getReg(0);
870     };
871   case AArch64CC::EQ:
872     return [LHS, RHS, IsZero, DstTy](MachineIRBuilder &MIB) {
873       return IsZero
874                  ? MIB.buildInstr(AArch64::G_FCMEQZ, {DstTy}, {LHS}).getReg(0)
875                  : MIB.buildInstr(AArch64::G_FCMEQ, {DstTy}, {LHS, RHS})
876                        .getReg(0);
877     };
878   case AArch64CC::GE:
879     return [LHS, RHS, IsZero, DstTy](MachineIRBuilder &MIB) {
880       return IsZero
881                  ? MIB.buildInstr(AArch64::G_FCMGEZ, {DstTy}, {LHS}).getReg(0)
882                  : MIB.buildInstr(AArch64::G_FCMGE, {DstTy}, {LHS, RHS})
883                        .getReg(0);
884     };
885   case AArch64CC::GT:
886     return [LHS, RHS, IsZero, DstTy](MachineIRBuilder &MIB) {
887       return IsZero
888                  ? MIB.buildInstr(AArch64::G_FCMGTZ, {DstTy}, {LHS}).getReg(0)
889                  : MIB.buildInstr(AArch64::G_FCMGT, {DstTy}, {LHS, RHS})
890                        .getReg(0);
891     };
892   case AArch64CC::LS:
893     return [LHS, RHS, IsZero, DstTy](MachineIRBuilder &MIB) {
894       return IsZero
895                  ? MIB.buildInstr(AArch64::G_FCMLEZ, {DstTy}, {LHS}).getReg(0)
896                  : MIB.buildInstr(AArch64::G_FCMGE, {DstTy}, {RHS, LHS})
897                        .getReg(0);
898     };
899   case AArch64CC::MI:
900     return [LHS, RHS, IsZero, DstTy](MachineIRBuilder &MIB) {
901       return IsZero
902                  ? MIB.buildInstr(AArch64::G_FCMLTZ, {DstTy}, {LHS}).getReg(0)
903                  : MIB.buildInstr(AArch64::G_FCMGT, {DstTy}, {RHS, LHS})
904                        .getReg(0);
905     };
906   }
907 }
908 
909 /// Try to lower a vector G_FCMP \p MI into an AArch64-specific pseudo.
lowerVectorFCMP(MachineInstr & MI,MachineRegisterInfo & MRI,MachineIRBuilder & MIB)910 static bool lowerVectorFCMP(MachineInstr &MI, MachineRegisterInfo &MRI,
911                             MachineIRBuilder &MIB) {
912   assert(MI.getOpcode() == TargetOpcode::G_FCMP);
913   const auto &ST = MI.getMF()->getSubtarget<AArch64Subtarget>();
914   Register Dst = MI.getOperand(0).getReg();
915   LLT DstTy = MRI.getType(Dst);
916   if (!DstTy.isVector() || !ST.hasNEON())
917     return false;
918   const auto Pred =
919       static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
920   Register LHS = MI.getOperand(2).getReg();
921   // TODO: Handle v4s16 case.
922   unsigned EltSize = MRI.getType(LHS).getScalarSizeInBits();
923   if (EltSize != 32 && EltSize != 64)
924     return false;
925   Register RHS = MI.getOperand(3).getReg();
926   auto Splat = getAArch64VectorSplat(*MRI.getVRegDef(RHS), MRI);
927 
928   // Compares against 0 have special target-specific pseudos.
929   bool IsZero = Splat && Splat->isCst() && Splat->getCst() == 0;
930   bool Invert;
931   AArch64CC::CondCode CC, CC2;
932   changeVectorFCMPPredToAArch64CC(Pred, CC, CC2, Invert);
933   bool NoNans = ST.getTargetLowering()->getTargetMachine().Options.NoNaNsFPMath;
934 
935   // Instead of having an apply function, just build here to simplify things.
936   MIB.setInstrAndDebugLoc(MI);
937   auto Cmp = getVectorFCMP(CC, LHS, RHS, IsZero, NoNans, MRI);
938   Register CmpRes;
939   if (CC2 == AArch64CC::AL)
940     CmpRes = Cmp(MIB);
941   else {
942     auto Cmp2 = getVectorFCMP(CC2, LHS, RHS, IsZero, NoNans, MRI);
943     auto Cmp2Dst = Cmp2(MIB);
944     auto Cmp1Dst = Cmp(MIB);
945     CmpRes = MIB.buildOr(DstTy, Cmp1Dst, Cmp2Dst).getReg(0);
946   }
947   if (Invert)
948     CmpRes = MIB.buildNot(DstTy, CmpRes).getReg(0);
949   MRI.replaceRegWith(Dst, CmpRes);
950   MI.eraseFromParent();
951   return false;
952 }
953 
matchFormTruncstore(MachineInstr & MI,MachineRegisterInfo & MRI,Register & SrcReg)954 static bool matchFormTruncstore(MachineInstr &MI, MachineRegisterInfo &MRI,
955                                 Register &SrcReg) {
956   assert(MI.getOpcode() == TargetOpcode::G_STORE);
957   Register DstReg = MI.getOperand(0).getReg();
958   if (MRI.getType(DstReg).isVector())
959     return false;
960   // Match a store of a truncate.
961   return mi_match(DstReg, MRI, m_GTrunc(m_Reg(SrcReg)));
962 }
963 
applyFormTruncstore(MachineInstr & MI,MachineRegisterInfo & MRI,MachineIRBuilder & B,GISelChangeObserver & Observer,Register & SrcReg)964 static bool applyFormTruncstore(MachineInstr &MI, MachineRegisterInfo &MRI,
965                                 MachineIRBuilder &B,
966                                 GISelChangeObserver &Observer,
967                                 Register &SrcReg) {
968   assert(MI.getOpcode() == TargetOpcode::G_STORE);
969   Observer.changingInstr(MI);
970   MI.getOperand(0).setReg(SrcReg);
971   Observer.changedInstr(MI);
972   return true;
973 }
974 
975 #define AARCH64POSTLEGALIZERLOWERINGHELPER_GENCOMBINERHELPER_DEPS
976 #include "AArch64GenPostLegalizeGILowering.inc"
977 #undef AARCH64POSTLEGALIZERLOWERINGHELPER_GENCOMBINERHELPER_DEPS
978 
979 namespace {
980 #define AARCH64POSTLEGALIZERLOWERINGHELPER_GENCOMBINERHELPER_H
981 #include "AArch64GenPostLegalizeGILowering.inc"
982 #undef AARCH64POSTLEGALIZERLOWERINGHELPER_GENCOMBINERHELPER_H
983 
984 class AArch64PostLegalizerLoweringInfo : public CombinerInfo {
985 public:
986   AArch64GenPostLegalizerLoweringHelperRuleConfig GeneratedRuleCfg;
987 
AArch64PostLegalizerLoweringInfo(bool OptSize,bool MinSize)988   AArch64PostLegalizerLoweringInfo(bool OptSize, bool MinSize)
989       : CombinerInfo(/*AllowIllegalOps*/ true, /*ShouldLegalizeIllegal*/ false,
990                      /*LegalizerInfo*/ nullptr, /*OptEnabled = */ true, OptSize,
991                      MinSize) {
992     if (!GeneratedRuleCfg.parseCommandLineOption())
993       report_fatal_error("Invalid rule identifier");
994   }
995 
996   virtual bool combine(GISelChangeObserver &Observer, MachineInstr &MI,
997                        MachineIRBuilder &B) const override;
998 };
999 
combine(GISelChangeObserver & Observer,MachineInstr & MI,MachineIRBuilder & B) const1000 bool AArch64PostLegalizerLoweringInfo::combine(GISelChangeObserver &Observer,
1001                                                MachineInstr &MI,
1002                                                MachineIRBuilder &B) const {
1003   CombinerHelper Helper(Observer, B);
1004   AArch64GenPostLegalizerLoweringHelper Generated(GeneratedRuleCfg);
1005   return Generated.tryCombineAll(Observer, MI, B, Helper);
1006 }
1007 
1008 #define AARCH64POSTLEGALIZERLOWERINGHELPER_GENCOMBINERHELPER_CPP
1009 #include "AArch64GenPostLegalizeGILowering.inc"
1010 #undef AARCH64POSTLEGALIZERLOWERINGHELPER_GENCOMBINERHELPER_CPP
1011 
1012 class AArch64PostLegalizerLowering : public MachineFunctionPass {
1013 public:
1014   static char ID;
1015 
1016   AArch64PostLegalizerLowering();
1017 
getPassName() const1018   StringRef getPassName() const override {
1019     return "AArch64PostLegalizerLowering";
1020   }
1021 
1022   bool runOnMachineFunction(MachineFunction &MF) override;
1023   void getAnalysisUsage(AnalysisUsage &AU) const override;
1024 };
1025 } // end anonymous namespace
1026 
getAnalysisUsage(AnalysisUsage & AU) const1027 void AArch64PostLegalizerLowering::getAnalysisUsage(AnalysisUsage &AU) const {
1028   AU.addRequired<TargetPassConfig>();
1029   AU.setPreservesCFG();
1030   getSelectionDAGFallbackAnalysisUsage(AU);
1031   MachineFunctionPass::getAnalysisUsage(AU);
1032 }
1033 
AArch64PostLegalizerLowering()1034 AArch64PostLegalizerLowering::AArch64PostLegalizerLowering()
1035     : MachineFunctionPass(ID) {
1036   initializeAArch64PostLegalizerLoweringPass(*PassRegistry::getPassRegistry());
1037 }
1038 
runOnMachineFunction(MachineFunction & MF)1039 bool AArch64PostLegalizerLowering::runOnMachineFunction(MachineFunction &MF) {
1040   if (MF.getProperties().hasProperty(
1041           MachineFunctionProperties::Property::FailedISel))
1042     return false;
1043   assert(MF.getProperties().hasProperty(
1044              MachineFunctionProperties::Property::Legalized) &&
1045          "Expected a legalized function?");
1046   auto *TPC = &getAnalysis<TargetPassConfig>();
1047   const Function &F = MF.getFunction();
1048   AArch64PostLegalizerLoweringInfo PCInfo(F.hasOptSize(), F.hasMinSize());
1049   Combiner C(PCInfo, TPC);
1050   return C.combineMachineInstrs(MF, /*CSEInfo*/ nullptr);
1051 }
1052 
1053 char AArch64PostLegalizerLowering::ID = 0;
1054 INITIALIZE_PASS_BEGIN(AArch64PostLegalizerLowering, DEBUG_TYPE,
1055                       "Lower AArch64 MachineInstrs after legalization", false,
1056                       false)
1057 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
1058 INITIALIZE_PASS_END(AArch64PostLegalizerLowering, DEBUG_TYPE,
1059                     "Lower AArch64 MachineInstrs after legalization", false,
1060                     false)
1061 
1062 namespace llvm {
createAArch64PostLegalizerLowering()1063 FunctionPass *createAArch64PostLegalizerLowering() {
1064   return new AArch64PostLegalizerLowering();
1065 }
1066 } // end namespace llvm
1067