xref: /llvm-project/llvm/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp (revision 96049fcf4e5f2eb0271bdfa89e113eef9c5fa9f6)
1 //===- PatternMatchTest.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "GISelMITest.h"
10 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12 #include "llvm/CodeGen/GlobalISel/Utils.h"
13 #include "llvm/CodeGen/MIRParser/MIRParser.h"
14 #include "llvm/CodeGen/MachineFunction.h"
15 #include "llvm/CodeGen/MachineModuleInfo.h"
16 #include "llvm/CodeGen/TargetFrameLowering.h"
17 #include "llvm/CodeGen/TargetInstrInfo.h"
18 #include "llvm/CodeGen/TargetLowering.h"
19 #include "llvm/CodeGen/TargetSubtargetInfo.h"
20 #include "llvm/MC/TargetRegistry.h"
21 #include "llvm/Support/SourceMgr.h"
22 #include "llvm/Support/TargetSelect.h"
23 #include "llvm/Target/TargetMachine.h"
24 #include "llvm/Target/TargetOptions.h"
25 #include "gtest/gtest.h"
26 
27 using namespace llvm;
28 using namespace MIPatternMatch;
29 
30 namespace {
31 
32 TEST_F(AArch64GISelMITest, MatchIntConstant) {
33   setUp();
34   if (!TM)
35     GTEST_SKIP();
36   auto MIBCst = B.buildConstant(LLT::scalar(64), 42);
37   int64_t Cst;
38   bool match = mi_match(MIBCst.getReg(0), *MRI, m_ICst(Cst));
39   EXPECT_TRUE(match);
40   EXPECT_EQ(Cst, 42);
41 }
42 
43 TEST_F(AArch64GISelMITest, MatchIntConstantRegister) {
44   setUp();
45   if (!TM)
46     GTEST_SKIP();
47   auto MIBCst = B.buildConstant(LLT::scalar(64), 42);
48   std::optional<ValueAndVReg> Src0;
49   bool match = mi_match(MIBCst.getReg(0), *MRI, m_GCst(Src0));
50   EXPECT_TRUE(match);
51   EXPECT_EQ(Src0->VReg, MIBCst.getReg(0));
52 }
53 
54 TEST_F(AArch64GISelMITest, MatchIntConstantSplat) {
55   setUp();
56   if (!TM)
57     GTEST_SKIP();
58 
59   LLT s64 = LLT::scalar(64);
60   LLT v2s64 = LLT::fixed_vector(2, s64);
61   LLT v4s64 = LLT::fixed_vector(4, s64);
62 
63   MachineInstrBuilder FortyTwoSplat =
64       B.buildSplatBuildVector(v4s64, B.buildConstant(s64, 42));
65   int64_t Cst;
66   EXPECT_TRUE(mi_match(FortyTwoSplat.getReg(0), *MRI, m_ICstOrSplat(Cst)));
67   EXPECT_EQ(Cst, 42);
68 
69   MachineInstrBuilder NonConstantSplat =
70       B.buildBuildVector(v4s64, {Copies[0], Copies[0], Copies[0], Copies[0]});
71   EXPECT_FALSE(mi_match(NonConstantSplat.getReg(0), *MRI, m_ICstOrSplat(Cst)));
72 
73   auto ICst = B.buildConstant(s64, 15).getReg(0);
74   auto SmallSplat = B.buildBuildVector(v2s64, {ICst, ICst}).getReg(0);
75   auto LargeSplat = B.buildConcatVectors(v4s64, {SmallSplat, SmallSplat});
76   EXPECT_TRUE(mi_match(LargeSplat.getReg(0), *MRI, m_ICstOrSplat(Cst)));
77 }
78 
79 TEST_F(AArch64GISelMITest, MachineInstrPtrBind) {
80   setUp();
81   if (!TM)
82     GTEST_SKIP();
83   auto MIBAdd = B.buildAdd(LLT::scalar(64), Copies[0], Copies[1]);
84   // Test 'MachineInstr *' bind.
85   // Default mi_match.
86   MachineInstr *MIPtr = MIBAdd.getInstr();
87   bool match = mi_match(MIPtr, *MRI, m_GAdd(m_Reg(), m_Reg()));
88   EXPECT_TRUE(match);
89   // Specialized mi_match for MachineInstr &.
90   MachineInstr &MI = *MIBAdd.getInstr();
91   match = mi_match(MI, *MRI, m_GAdd(m_Reg(), m_Reg()));
92   EXPECT_TRUE(match);
93   // MachineInstrBuilder has automatic conversion to MachineInstr *.
94   match = mi_match(MIBAdd, *MRI, m_GAdd(m_Reg(), m_Reg()));
95   EXPECT_TRUE(match);
96   // Match instruction without def.
97   auto MIBBrcond = B.buildBrCond(Copies[0], B.getMBB());
98   MachineInstr *MatchedMI;
99   match = mi_match(MIBBrcond, *MRI, m_MInstr(MatchedMI));
100   EXPECT_TRUE(match);
101   EXPECT_TRUE(MIBBrcond.getInstr() == MatchedMI);
102   // Match instruction with two defs.
103   auto MIBUAddO =
104       B.buildUAddo(LLT::scalar(64), LLT::scalar(1), Copies[0], Copies[1]);
105   match = mi_match(MIBUAddO, *MRI, m_MInstr(MatchedMI));
106   EXPECT_TRUE(match);
107   EXPECT_TRUE(MIBUAddO.getInstr() == MatchedMI);
108 }
109 
110 TEST_F(AArch64GISelMITest, MatchBinaryOp) {
111   setUp();
112   if (!TM)
113     GTEST_SKIP();
114   LLT s32 = LLT::scalar(32);
115   LLT s64 = LLT::scalar(64);
116   LLT p0 = LLT::pointer(0, 64);
117   auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]);
118   // Test case for no bind.
119   bool match =
120       mi_match(MIBAdd.getReg(0), *MRI, m_GAdd(m_Reg(), m_Reg()));
121   EXPECT_TRUE(match);
122   Register Src0, Src1, Src2;
123   match = mi_match(MIBAdd.getReg(0), *MRI,
124                    m_GAdd(m_Reg(Src0), m_Reg(Src1)));
125   EXPECT_TRUE(match);
126   EXPECT_EQ(Src0, Copies[0]);
127   EXPECT_EQ(Src1, Copies[1]);
128 
129   // Build MUL(ADD %0, %1), %2
130   auto MIBMul = B.buildMul(s64, MIBAdd, Copies[2]);
131 
132   // Try to match MUL.
133   match = mi_match(MIBMul.getReg(0), *MRI,
134                    m_GMul(m_Reg(Src0), m_Reg(Src1)));
135   EXPECT_TRUE(match);
136   EXPECT_EQ(Src0, MIBAdd.getReg(0));
137   EXPECT_EQ(Src1, Copies[2]);
138 
139   // Try to match MUL(ADD)
140   match = mi_match(MIBMul.getReg(0), *MRI,
141                    m_GMul(m_GAdd(m_Reg(Src0), m_Reg(Src1)), m_Reg(Src2)));
142   EXPECT_TRUE(match);
143   EXPECT_EQ(Src0, Copies[0]);
144   EXPECT_EQ(Src1, Copies[1]);
145   EXPECT_EQ(Src2, Copies[2]);
146 
147   // Test Commutativity.
148   auto MIBMul2 = B.buildMul(s64, Copies[0], B.buildConstant(s64, 42));
149   // Try to match MUL(Cst, Reg) on src of MUL(Reg, Cst) to validate
150   // commutativity.
151   int64_t Cst;
152   match = mi_match(MIBMul2.getReg(0), *MRI,
153                    m_GMul(m_ICst(Cst), m_Reg(Src0)));
154   EXPECT_TRUE(match);
155   EXPECT_EQ(Cst, 42);
156   EXPECT_EQ(Src0, Copies[0]);
157 
158   // Make sure commutative doesn't work with something like SUB.
159   auto MIBSub = B.buildSub(s64, Copies[0], B.buildConstant(s64, 42));
160   match = mi_match(MIBSub.getReg(0), *MRI,
161                    m_GSub(m_ICst(Cst), m_Reg(Src0)));
162   EXPECT_FALSE(match);
163 
164   auto MIBFMul = B.buildInstr(TargetOpcode::G_FMUL, {s64},
165                               {Copies[0], B.buildConstant(s64, 42)});
166   // Match and test commutativity for FMUL.
167   match = mi_match(MIBFMul.getReg(0), *MRI,
168                    m_GFMul(m_ICst(Cst), m_Reg(Src0)));
169   EXPECT_TRUE(match);
170   EXPECT_EQ(Cst, 42);
171   EXPECT_EQ(Src0, Copies[0]);
172 
173   // FSUB
174   auto MIBFSub = B.buildInstr(TargetOpcode::G_FSUB, {s64},
175                               {Copies[0], B.buildConstant(s64, 42)});
176   match = mi_match(MIBFSub.getReg(0), *MRI,
177                    m_GFSub(m_Reg(Src0), m_Reg()));
178   EXPECT_TRUE(match);
179   EXPECT_EQ(Src0, Copies[0]);
180 
181   // Build AND %0, %1
182   auto MIBAnd = B.buildAnd(s64, Copies[0], Copies[1]);
183   // Try to match AND.
184   match = mi_match(MIBAnd.getReg(0), *MRI,
185                    m_GAnd(m_Reg(Src0), m_Reg(Src1)));
186   EXPECT_TRUE(match);
187   EXPECT_EQ(Src0, Copies[0]);
188   EXPECT_EQ(Src1, Copies[1]);
189 
190   // Build OR %0, %1
191   auto MIBOr = B.buildOr(s64, Copies[0], Copies[1]);
192   // Try to match OR.
193   match = mi_match(MIBOr.getReg(0), *MRI,
194                    m_GOr(m_Reg(Src0), m_Reg(Src1)));
195   EXPECT_TRUE(match);
196   EXPECT_EQ(Src0, Copies[0]);
197   EXPECT_EQ(Src1, Copies[1]);
198 
199   // Match lshr, and make sure a different shift amount type works.
200   auto TruncCopy1 = B.buildTrunc(s32, Copies[1]);
201   auto LShr = B.buildLShr(s64, Copies[0], TruncCopy1);
202   match = mi_match(LShr.getReg(0), *MRI,
203                    m_GLShr(m_Reg(Src0), m_Reg(Src1)));
204   EXPECT_TRUE(match);
205   EXPECT_EQ(Src0, Copies[0]);
206   EXPECT_EQ(Src1, TruncCopy1.getReg(0));
207 
208   // Match shl, and make sure a different shift amount type works.
209   auto Shl = B.buildShl(s64, Copies[0], TruncCopy1);
210   match = mi_match(Shl.getReg(0), *MRI,
211                    m_GShl(m_Reg(Src0), m_Reg(Src1)));
212   EXPECT_TRUE(match);
213   EXPECT_EQ(Src0, Copies[0]);
214   EXPECT_EQ(Src1, TruncCopy1.getReg(0));
215 
216   // Build a G_PTR_ADD and check that we can match it.
217   auto PtrAdd = B.buildPtrAdd(p0, {B.buildUndef(p0)}, Copies[0]);
218   match = mi_match(PtrAdd.getReg(0), *MRI, m_GPtrAdd(m_Reg(Src0), m_Reg(Src1)));
219   EXPECT_TRUE(match);
220   EXPECT_EQ(Src0, PtrAdd->getOperand(1).getReg());
221   EXPECT_EQ(Src1, Copies[0]);
222 
223   auto MIBCst = B.buildConstant(s64, 42);
224   auto MIBAddCst = B.buildAdd(s64, MIBCst, Copies[0]);
225   auto MIBUnmerge = B.buildUnmerge({s32, s32}, B.buildConstant(s64, 42));
226 
227   // m_BinOp with opcode.
228   // Match binary instruction, opcode and its non-commutative operands.
229   match = mi_match(MIBAddCst, *MRI,
230                    m_BinOp(TargetOpcode::G_ADD, m_ICst(Cst), m_Reg(Src0)));
231   EXPECT_TRUE(match);
232   EXPECT_EQ(Src0, Copies[0]);
233   EXPECT_EQ(Cst, 42);
234 
235   // Opcode doesn't match.
236   match = mi_match(MIBAddCst, *MRI,
237                    m_BinOp(TargetOpcode::G_MUL, m_ICst(Cst), m_Reg(Src0)));
238   EXPECT_FALSE(match);
239 
240   match = mi_match(MIBAddCst, *MRI,
241                    m_BinOp(TargetOpcode::G_ADD, m_Reg(Src0), m_ICst(Cst)));
242   EXPECT_FALSE(match);
243 
244   // Instruction is not binary.
245   match = mi_match(MIBCst, *MRI,
246                    m_BinOp(TargetOpcode::G_MUL, m_Reg(Src0), m_Reg(Src1)));
247   EXPECT_FALSE(match);
248   match = mi_match(MIBUnmerge, *MRI,
249                    m_BinOp(TargetOpcode::G_MUL, m_Reg(Src0), m_Reg(Src1)));
250   EXPECT_FALSE(match);
251 
252   // m_CommutativeBinOp with opcode.
253   match = mi_match(
254       MIBAddCst, *MRI,
255       m_CommutativeBinOp(TargetOpcode::G_ADD, m_ICst(Cst), m_Reg(Src0)));
256   EXPECT_TRUE(match);
257   EXPECT_EQ(Src0, Copies[0]);
258   EXPECT_EQ(Cst, 42);
259 
260   match = mi_match(
261       MIBAddCst, *MRI,
262       m_CommutativeBinOp(TargetOpcode::G_MUL, m_ICst(Cst), m_Reg(Src0)));
263   EXPECT_FALSE(match);
264 
265   match = mi_match(
266       MIBAddCst, *MRI,
267       m_CommutativeBinOp(TargetOpcode::G_ADD, m_Reg(Src0), m_ICst(Cst)));
268   EXPECT_TRUE(match);
269   EXPECT_EQ(Src0, Copies[0]);
270   EXPECT_EQ(Cst, 42);
271 
272   match = mi_match(
273       MIBCst, *MRI,
274       m_CommutativeBinOp(TargetOpcode::G_MUL, m_Reg(Src0), m_Reg(Src1)));
275   EXPECT_FALSE(match);
276   match = mi_match(
277       MIBUnmerge, *MRI,
278       m_CommutativeBinOp(TargetOpcode::G_MUL, m_Reg(Src0), m_Reg(Src1)));
279   EXPECT_FALSE(match);
280 }
281 
282 TEST_F(AArch64GISelMITest, MatchICmp) {
283   setUp();
284   if (!TM)
285     GTEST_SKIP();
286 
287   const LLT s1 = LLT::scalar(1);
288   auto CmpEq = B.buildICmp(CmpInst::ICMP_EQ, s1, Copies[0], Copies[1]);
289 
290   // Check match any predicate.
291   bool match =
292       mi_match(CmpEq.getReg(0), *MRI, m_GICmp(m_Pred(), m_Reg(), m_Reg()));
293   EXPECT_TRUE(match);
294 
295   // Check we get the predicate and registers.
296   CmpInst::Predicate Pred;
297   Register Reg0;
298   Register Reg1;
299   match = mi_match(CmpEq.getReg(0), *MRI,
300                    m_GICmp(m_Pred(Pred), m_Reg(Reg0), m_Reg(Reg1)));
301   EXPECT_TRUE(match);
302   EXPECT_EQ(CmpInst::ICMP_EQ, Pred);
303   EXPECT_EQ(Copies[0], Reg0);
304   EXPECT_EQ(Copies[1], Reg1);
305 }
306 
307 TEST_F(AArch64GISelMITest, MatchFCmp) {
308   setUp();
309   if (!TM)
310     GTEST_SKIP();
311 
312   const LLT s1 = LLT::scalar(1);
313   auto CmpEq = B.buildFCmp(CmpInst::FCMP_OEQ, s1, Copies[0], Copies[1]);
314 
315   // Check match any predicate.
316   bool match =
317       mi_match(CmpEq.getReg(0), *MRI, m_GFCmp(m_Pred(), m_Reg(), m_Reg()));
318   EXPECT_TRUE(match);
319 
320   // Check we get the predicate and registers.
321   CmpInst::Predicate Pred;
322   Register Reg0;
323   Register Reg1;
324   match = mi_match(CmpEq.getReg(0), *MRI,
325                    m_GFCmp(m_Pred(Pred), m_Reg(Reg0), m_Reg(Reg1)));
326   EXPECT_TRUE(match);
327   EXPECT_EQ(CmpInst::FCMP_OEQ, Pred);
328   EXPECT_EQ(Copies[0], Reg0);
329   EXPECT_EQ(Copies[1], Reg1);
330 }
331 
332 TEST_F(AArch64GISelMITest, MatcCommutativeICmp) {
333   setUp();
334   if (!TM)
335     GTEST_SKIP();
336   const LLT s1 = LLT::scalar(1);
337   Register LHS = Copies[0];
338   Register RHS = Copies[1];
339   CmpInst::Predicate MatchedPred;
340   bool Match = false;
341   for (unsigned P = CmpInst::Predicate::FIRST_ICMP_PREDICATE;
342        P < CmpInst::Predicate::LAST_ICMP_PREDICATE; ++P) {
343     auto CurrPred = static_cast<CmpInst::Predicate>(P);
344     auto Cmp = B.buildICmp(CurrPred, s1, LHS, RHS);
345     // Basic matching.
346     Match = mi_match(
347         Cmp.getReg(0), *MRI,
348         m_c_GICmp(m_Pred(MatchedPred), m_SpecificReg(LHS), m_SpecificReg(RHS)));
349     EXPECT_TRUE(Match);
350     EXPECT_EQ(MatchedPred, CurrPred);
351     // Commuting operands should still match, but the predicate should be
352     // swapped.
353     Match = mi_match(
354         Cmp.getReg(0), *MRI,
355         m_c_GICmp(m_Pred(MatchedPred), m_SpecificReg(RHS), m_SpecificReg(LHS)));
356     EXPECT_TRUE(Match);
357     EXPECT_EQ(MatchedPred, CmpInst::getSwappedPredicate(CurrPred));
358   }
359 }
360 
361 TEST_F(AArch64GISelMITest, MatcCommutativeFCmp) {
362   setUp();
363   if (!TM)
364     GTEST_SKIP();
365   const LLT s1 = LLT::scalar(1);
366   Register LHS = Copies[0];
367   Register RHS = Copies[1];
368   CmpInst::Predicate MatchedPred;
369   bool Match = false;
370   for (unsigned P = CmpInst::Predicate::FIRST_FCMP_PREDICATE;
371        P < CmpInst::Predicate::LAST_FCMP_PREDICATE; ++P) {
372     auto CurrPred = static_cast<CmpInst::Predicate>(P);
373     auto Cmp = B.buildFCmp(CurrPred, s1, LHS, RHS);
374     // Basic matching.
375     Match = mi_match(
376         Cmp.getReg(0), *MRI,
377         m_c_GFCmp(m_Pred(MatchedPred), m_SpecificReg(LHS), m_SpecificReg(RHS)));
378     EXPECT_TRUE(Match);
379     EXPECT_EQ(MatchedPred, CurrPred);
380     // Commuting operands should still match, but the predicate should be
381     // swapped.
382     Match = mi_match(
383         Cmp.getReg(0), *MRI,
384         m_c_GFCmp(m_Pred(MatchedPred), m_SpecificReg(RHS), m_SpecificReg(LHS)));
385     EXPECT_TRUE(Match);
386     EXPECT_EQ(MatchedPred, CmpInst::getSwappedPredicate(CurrPred));
387   }
388 }
389 
390 TEST_F(AArch64GISelMITest, MatchFPUnaryOp) {
391   setUp();
392   if (!TM)
393     GTEST_SKIP();
394 
395   // Truncate s64 to s32.
396   LLT s32 = LLT::scalar(32);
397   auto Copy0s32 = B.buildFPTrunc(s32, Copies[0]);
398 
399   // Match G_FABS.
400   auto MIBFabs = B.buildInstr(TargetOpcode::G_FABS, {s32}, {Copy0s32});
401   bool match =
402       mi_match(MIBFabs.getReg(0), *MRI, m_GFabs(m_Reg()));
403   EXPECT_TRUE(match);
404 
405   Register Src;
406   auto MIBFNeg = B.buildInstr(TargetOpcode::G_FNEG, {s32}, {Copy0s32});
407   match = mi_match(MIBFNeg.getReg(0), *MRI, m_GFNeg(m_Reg(Src)));
408   EXPECT_TRUE(match);
409   EXPECT_EQ(Src, Copy0s32.getReg(0));
410 
411   match = mi_match(MIBFabs.getReg(0), *MRI, m_GFabs(m_Reg(Src)));
412   EXPECT_TRUE(match);
413   EXPECT_EQ(Src, Copy0s32.getReg(0));
414 
415   // Build and match FConstant.
416   auto MIBFCst = B.buildFConstant(s32, .5);
417   const ConstantFP *TmpFP{};
418   match = mi_match(MIBFCst.getReg(0), *MRI, m_GFCst(TmpFP));
419   EXPECT_TRUE(match);
420   EXPECT_TRUE(TmpFP);
421   APFloat APF((float).5);
422   auto *CFP = ConstantFP::get(Context, APF);
423   EXPECT_EQ(CFP, TmpFP);
424 
425   // Build double float.
426   LLT s64 = LLT::scalar(64);
427   auto MIBFCst64 = B.buildFConstant(s64, .5);
428   const ConstantFP *TmpFP64{};
429   match = mi_match(MIBFCst64.getReg(0), *MRI, m_GFCst(TmpFP64));
430   EXPECT_TRUE(match);
431   EXPECT_TRUE(TmpFP64);
432   APFloat APF64(.5);
433   auto CFP64 = ConstantFP::get(Context, APF64);
434   EXPECT_EQ(CFP64, TmpFP64);
435   EXPECT_NE(TmpFP64, TmpFP);
436 
437   // Build half float.
438   LLT s16 = LLT::scalar(16);
439   auto MIBFCst16 = B.buildFConstant(s16, .5);
440   const ConstantFP *TmpFP16{};
441   match = mi_match(MIBFCst16.getReg(0), *MRI, m_GFCst(TmpFP16));
442   EXPECT_TRUE(match);
443   EXPECT_TRUE(TmpFP16);
444   bool Ignored;
445   APFloat APF16(.5);
446   APF16.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
447   auto CFP16 = ConstantFP::get(Context, APF16);
448   EXPECT_EQ(TmpFP16, CFP16);
449   EXPECT_NE(TmpFP16, TmpFP);
450 }
451 
452 TEST_F(AArch64GISelMITest, MatchExtendsTrunc) {
453   setUp();
454   if (!TM)
455     GTEST_SKIP();
456 
457   LLT s64 = LLT::scalar(64);
458   LLT s32 = LLT::scalar(32);
459 
460   auto MIBTrunc = B.buildTrunc(s32, Copies[0]);
461   auto MIBAExt = B.buildAnyExt(s64, MIBTrunc);
462   auto MIBZExt = B.buildZExt(s64, MIBTrunc);
463   auto MIBSExt = B.buildSExt(s64, MIBTrunc);
464   Register Src0;
465   bool match =
466       mi_match(MIBTrunc.getReg(0), *MRI, m_GTrunc(m_Reg(Src0)));
467   EXPECT_TRUE(match);
468   EXPECT_EQ(Src0, Copies[0]);
469   match =
470       mi_match(MIBAExt.getReg(0), *MRI, m_GAnyExt(m_Reg(Src0)));
471   EXPECT_TRUE(match);
472   EXPECT_EQ(Src0, MIBTrunc.getReg(0));
473 
474   match = mi_match(MIBSExt.getReg(0), *MRI, m_GSExt(m_Reg(Src0)));
475   EXPECT_TRUE(match);
476   EXPECT_EQ(Src0, MIBTrunc.getReg(0));
477 
478   match = mi_match(MIBZExt.getReg(0), *MRI, m_GZExt(m_Reg(Src0)));
479   EXPECT_TRUE(match);
480   EXPECT_EQ(Src0, MIBTrunc.getReg(0));
481 
482   // Match ext(trunc src)
483   match = mi_match(MIBAExt.getReg(0), *MRI,
484                    m_GAnyExt(m_GTrunc(m_Reg(Src0))));
485   EXPECT_TRUE(match);
486   EXPECT_EQ(Src0, Copies[0]);
487 
488   match = mi_match(MIBSExt.getReg(0), *MRI,
489                    m_GSExt(m_GTrunc(m_Reg(Src0))));
490   EXPECT_TRUE(match);
491   EXPECT_EQ(Src0, Copies[0]);
492 
493   match = mi_match(MIBZExt.getReg(0), *MRI,
494                    m_GZExt(m_GTrunc(m_Reg(Src0))));
495   EXPECT_TRUE(match);
496   EXPECT_EQ(Src0, Copies[0]);
497 }
498 
499 TEST_F(AArch64GISelMITest, MatchSpecificType) {
500   setUp();
501   if (!TM)
502     GTEST_SKIP();
503 
504   // Try to match a 64bit add.
505   LLT s64 = LLT::scalar(64);
506   LLT s32 = LLT::scalar(32);
507   auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]);
508   EXPECT_FALSE(mi_match(MIBAdd.getReg(0), *MRI,
509                         m_GAdd(m_SpecificType(s32), m_Reg())));
510   EXPECT_TRUE(mi_match(MIBAdd.getReg(0), *MRI,
511                        m_GAdd(m_SpecificType(s64), m_Reg())));
512 
513   // Try to match the destination type of a bitcast.
514   LLT v2s32 = LLT::fixed_vector(2, 32);
515   auto MIBCast = B.buildCast(v2s32, Copies[0]);
516   EXPECT_TRUE(
517       mi_match(MIBCast.getReg(0), *MRI, m_GBitcast(m_Reg())));
518   EXPECT_TRUE(
519       mi_match(MIBCast.getReg(0), *MRI, m_SpecificType(v2s32)));
520   EXPECT_TRUE(
521       mi_match(MIBCast.getReg(1), *MRI, m_SpecificType(s64)));
522 
523   // Build a PTRToInt and INTTOPTR and match and test them.
524   LLT PtrTy = LLT::pointer(0, 64);
525   auto MIBIntToPtr = B.buildCast(PtrTy, Copies[0]);
526   auto MIBPtrToInt = B.buildCast(s64, MIBIntToPtr);
527   Register Src0;
528 
529   // match the ptrtoint(inttoptr reg)
530   bool match = mi_match(MIBPtrToInt.getReg(0), *MRI,
531                         m_GPtrToInt(m_GIntToPtr(m_Reg(Src0))));
532   EXPECT_TRUE(match);
533   EXPECT_EQ(Src0, Copies[0]);
534 }
535 
536 TEST_F(AArch64GISelMITest, MatchCombinators) {
537   setUp();
538   if (!TM)
539     GTEST_SKIP();
540 
541   LLT s64 = LLT::scalar(64);
542   LLT s32 = LLT::scalar(32);
543   auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]);
544   Register Src0, Src1;
545   bool match =
546       mi_match(MIBAdd.getReg(0), *MRI,
547                m_all_of(m_SpecificType(s64), m_GAdd(m_Reg(Src0), m_Reg(Src1))));
548   EXPECT_TRUE(match);
549   EXPECT_EQ(Src0, Copies[0]);
550   EXPECT_EQ(Src1, Copies[1]);
551   // Check for s32 (which should fail).
552   match =
553       mi_match(MIBAdd.getReg(0), *MRI,
554                m_all_of(m_SpecificType(s32), m_GAdd(m_Reg(Src0), m_Reg(Src1))));
555   EXPECT_FALSE(match);
556   match =
557       mi_match(MIBAdd.getReg(0), *MRI,
558                m_any_of(m_SpecificType(s32), m_GAdd(m_Reg(Src0), m_Reg(Src1))));
559   EXPECT_TRUE(match);
560   EXPECT_EQ(Src0, Copies[0]);
561   EXPECT_EQ(Src1, Copies[1]);
562 
563   // Match a case where none of the predicates hold true.
564   match = mi_match(
565       MIBAdd.getReg(0), *MRI,
566       m_any_of(m_SpecificType(LLT::scalar(16)), m_GSub(m_Reg(), m_Reg())));
567   EXPECT_FALSE(match);
568 }
569 
570 TEST_F(AArch64GISelMITest, MatchMiscellaneous) {
571   setUp();
572   if (!TM)
573     GTEST_SKIP();
574 
575   LLT s64 = LLT::scalar(64);
576   auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]);
577   Register Reg = MIBAdd.getReg(0);
578 
579   // Only one use of Reg.
580   B.buildCast(LLT::pointer(0, 32), MIBAdd);
581   EXPECT_TRUE(mi_match(Reg, *MRI, m_OneUse(m_GAdd(m_Reg(), m_Reg()))));
582   EXPECT_TRUE(mi_match(Reg, *MRI, m_OneNonDBGUse(m_GAdd(m_Reg(), m_Reg()))));
583 
584   // Add multiple debug uses of Reg.
585   B.buildInstr(TargetOpcode::DBG_VALUE, {}, {Reg});
586   B.buildInstr(TargetOpcode::DBG_VALUE, {}, {Reg});
587 
588   EXPECT_FALSE(mi_match(Reg, *MRI, m_OneUse(m_GAdd(m_Reg(), m_Reg()))));
589   EXPECT_TRUE(mi_match(Reg, *MRI, m_OneNonDBGUse(m_GAdd(m_Reg(), m_Reg()))));
590 
591   // Multiple non-debug uses of Reg.
592   B.buildCast(LLT::pointer(1, 32), MIBAdd);
593   EXPECT_FALSE(mi_match(Reg, *MRI, m_OneUse(m_GAdd(m_Reg(), m_Reg()))));
594   EXPECT_FALSE(mi_match(Reg, *MRI, m_OneNonDBGUse(m_GAdd(m_Reg(), m_Reg()))));
595 }
596 
597 TEST_F(AArch64GISelMITest, MatchSpecificConstant) {
598   setUp();
599   if (!TM)
600     GTEST_SKIP();
601 
602   // Basic case: Can we match a G_CONSTANT with a specific value?
603   auto FortyTwo = B.buildConstant(LLT::scalar(64), 42);
604   EXPECT_TRUE(mi_match(FortyTwo.getReg(0), *MRI, m_SpecificICst(42)));
605   EXPECT_FALSE(mi_match(FortyTwo.getReg(0), *MRI, m_SpecificICst(123)));
606 
607   // Test that this works inside of a more complex pattern.
608   LLT s64 = LLT::scalar(64);
609   auto MIBAdd = B.buildAdd(s64, Copies[0], FortyTwo);
610   EXPECT_TRUE(mi_match(MIBAdd.getReg(2), *MRI, m_SpecificICst(42)));
611 
612   // Wrong constant.
613   EXPECT_FALSE(mi_match(MIBAdd.getReg(2), *MRI, m_SpecificICst(123)));
614 
615   // No constant on the LHS.
616   EXPECT_FALSE(mi_match(MIBAdd.getReg(1), *MRI, m_SpecificICst(42)));
617 }
618 
619 TEST_F(AArch64GISelMITest, MatchSpecificConstantSplat) {
620   setUp();
621   if (!TM)
622     GTEST_SKIP();
623 
624   LLT s64 = LLT::scalar(64);
625   LLT v4s64 = LLT::fixed_vector(4, s64);
626 
627   MachineInstrBuilder FortyTwoSplat =
628       B.buildSplatBuildVector(v4s64, B.buildConstant(s64, 42));
629   MachineInstrBuilder FortyTwo = B.buildConstant(s64, 42);
630 
631   EXPECT_TRUE(mi_match(FortyTwoSplat.getReg(0), *MRI, m_SpecificICstSplat(42)));
632   EXPECT_FALSE(
633       mi_match(FortyTwoSplat.getReg(0), *MRI, m_SpecificICstSplat(43)));
634   EXPECT_FALSE(mi_match(FortyTwo.getReg(0), *MRI, m_SpecificICstSplat(42)));
635 
636   MachineInstrBuilder NonConstantSplat =
637       B.buildBuildVector(v4s64, {Copies[0], Copies[0], Copies[0], Copies[0]});
638 
639   MachineInstrBuilder AddSplat =
640       B.buildAdd(v4s64, NonConstantSplat, FortyTwoSplat);
641   EXPECT_TRUE(mi_match(AddSplat.getReg(2), *MRI, m_SpecificICstSplat(42)));
642   EXPECT_FALSE(mi_match(AddSplat.getReg(2), *MRI, m_SpecificICstSplat(43)));
643   EXPECT_FALSE(mi_match(AddSplat.getReg(1), *MRI, m_SpecificICstSplat(42)));
644 
645   MachineInstrBuilder Add = B.buildAdd(s64, Copies[0], FortyTwo);
646   EXPECT_FALSE(mi_match(Add.getReg(2), *MRI, m_SpecificICstSplat(42)));
647 }
648 
649 TEST_F(AArch64GISelMITest, MatchSpecificConstantOrSplat) {
650   setUp();
651   if (!TM)
652     GTEST_SKIP();
653 
654   LLT s64 = LLT::scalar(64);
655   LLT v4s64 = LLT::fixed_vector(4, s64);
656 
657   MachineInstrBuilder FortyTwoSplat =
658       B.buildSplatBuildVector(v4s64, B.buildConstant(s64, 42));
659   MachineInstrBuilder FortyTwo = B.buildConstant(s64, 42);
660 
661   EXPECT_TRUE(
662       mi_match(FortyTwoSplat.getReg(0), *MRI, m_SpecificICstOrSplat(42)));
663   EXPECT_FALSE(
664       mi_match(FortyTwoSplat.getReg(0), *MRI, m_SpecificICstOrSplat(43)));
665   EXPECT_TRUE(mi_match(FortyTwo.getReg(0), *MRI, m_SpecificICstOrSplat(42)));
666 
667   MachineInstrBuilder NonConstantSplat =
668       B.buildBuildVector(v4s64, {Copies[0], Copies[0], Copies[0], Copies[0]});
669 
670   MachineInstrBuilder AddSplat =
671       B.buildAdd(v4s64, NonConstantSplat, FortyTwoSplat);
672   EXPECT_TRUE(mi_match(AddSplat.getReg(2), *MRI, m_SpecificICstOrSplat(42)));
673   EXPECT_FALSE(mi_match(AddSplat.getReg(2), *MRI, m_SpecificICstOrSplat(43)));
674   EXPECT_FALSE(mi_match(AddSplat.getReg(1), *MRI, m_SpecificICstOrSplat(42)));
675 
676   MachineInstrBuilder Add = B.buildAdd(s64, Copies[0], FortyTwo);
677   EXPECT_TRUE(mi_match(Add.getReg(2), *MRI, m_SpecificICstOrSplat(42)));
678 }
679 
680 TEST_F(AArch64GISelMITest, MatchZeroInt) {
681   setUp();
682   if (!TM)
683     GTEST_SKIP();
684   auto Zero = B.buildConstant(LLT::scalar(64), 0);
685   EXPECT_TRUE(mi_match(Zero.getReg(0), *MRI, m_ZeroInt()));
686 
687   auto FortyTwo = B.buildConstant(LLT::scalar(64), 42);
688   EXPECT_FALSE(mi_match(FortyTwo.getReg(0), *MRI, m_ZeroInt()));
689 }
690 
691 TEST_F(AArch64GISelMITest, MatchAllOnesInt) {
692   setUp();
693   if (!TM)
694     GTEST_SKIP();
695   auto AllOnes = B.buildConstant(LLT::scalar(64), -1);
696   EXPECT_TRUE(mi_match(AllOnes.getReg(0), *MRI, m_AllOnesInt()));
697 
698   auto FortyTwo = B.buildConstant(LLT::scalar(64), 42);
699   EXPECT_FALSE(mi_match(FortyTwo.getReg(0), *MRI, m_AllOnesInt()));
700 }
701 
702 TEST_F(AArch64GISelMITest, MatchFPOrIntConst) {
703   setUp();
704   if (!TM)
705     GTEST_SKIP();
706 
707   Register IntOne = B.buildConstant(LLT::scalar(64), 1).getReg(0);
708   Register FPOne = B.buildFConstant(LLT::scalar(64), 1.0).getReg(0);
709   std::optional<ValueAndVReg> ValReg;
710   std::optional<FPValueAndVReg> FValReg;
711 
712   EXPECT_TRUE(mi_match(IntOne, *MRI, m_GCst(ValReg)));
713   EXPECT_EQ(IntOne, ValReg->VReg);
714   EXPECT_FALSE(mi_match(IntOne, *MRI, m_GFCst(FValReg)));
715 
716   EXPECT_FALSE(mi_match(FPOne, *MRI, m_GCst(ValReg)));
717   EXPECT_TRUE(mi_match(FPOne, *MRI, m_GFCst(FValReg)));
718   EXPECT_EQ(FPOne, FValReg->VReg);
719 }
720 
721 TEST_F(AArch64GISelMITest, MatchConstantSplat) {
722   setUp();
723   if (!TM)
724     GTEST_SKIP();
725 
726   LLT s64 = LLT::scalar(64);
727   LLT v2s64 = LLT::fixed_vector(2, 64);
728   LLT v4s64 = LLT::fixed_vector(4, 64);
729 
730   Register FPOne = B.buildFConstant(s64, 1.0).getReg(0);
731   Register FPZero = B.buildFConstant(s64, 0.0).getReg(0);
732   Register Undef = B.buildUndef(s64).getReg(0);
733   std::optional<FPValueAndVReg> FValReg;
734 
735   // GFCstOrSplatGFCstMatch allows undef as part of splat. Undef often comes
736   // from padding to legalize into available operation and then ignore added
737   // elements e.g. v3s64 to v4s64.
738 
739   EXPECT_TRUE(mi_match(FPZero, *MRI, GFCstOrSplatGFCstMatch(FValReg)));
740   EXPECT_EQ(FPZero, FValReg->VReg);
741 
742   EXPECT_FALSE(mi_match(Undef, *MRI, GFCstOrSplatGFCstMatch(FValReg)));
743 
744   auto ZeroSplat = B.buildBuildVector(v4s64, {FPZero, FPZero, FPZero, FPZero});
745   EXPECT_TRUE(
746       mi_match(ZeroSplat.getReg(0), *MRI, GFCstOrSplatGFCstMatch(FValReg)));
747   EXPECT_EQ(FPZero, FValReg->VReg);
748 
749   auto ZeroUndef = B.buildBuildVector(v4s64, {FPZero, FPZero, FPZero, Undef});
750   EXPECT_TRUE(
751       mi_match(ZeroUndef.getReg(0), *MRI, GFCstOrSplatGFCstMatch(FValReg)));
752   EXPECT_EQ(FPZero, FValReg->VReg);
753 
754   // All undefs are not constant splat.
755   auto UndefSplat = B.buildBuildVector(v4s64, {Undef, Undef, Undef, Undef});
756   EXPECT_FALSE(
757       mi_match(UndefSplat.getReg(0), *MRI, GFCstOrSplatGFCstMatch(FValReg)));
758 
759   auto ZeroOne = B.buildBuildVector(v4s64, {FPZero, FPZero, FPZero, FPOne});
760   EXPECT_FALSE(
761       mi_match(ZeroOne.getReg(0), *MRI, GFCstOrSplatGFCstMatch(FValReg)));
762 
763   auto NonConstantSplat =
764       B.buildBuildVector(v4s64, {Copies[0], Copies[0], Copies[0], Copies[0]});
765   EXPECT_FALSE(mi_match(NonConstantSplat.getReg(0), *MRI,
766                         GFCstOrSplatGFCstMatch(FValReg)));
767 
768   auto Mixed = B.buildBuildVector(v4s64, {FPZero, FPZero, FPZero, Copies[0]});
769   EXPECT_FALSE(
770       mi_match(Mixed.getReg(0), *MRI, GFCstOrSplatGFCstMatch(FValReg)));
771 
772   // Look through G_CONCAT_VECTORS.
773   auto SmallZeroSplat = B.buildBuildVector(v2s64, {FPZero, FPZero}).getReg(0);
774   auto LargeZeroSplat =
775       B.buildConcatVectors(v4s64, {SmallZeroSplat, SmallZeroSplat});
776   EXPECT_TRUE(mi_match(LargeZeroSplat.getReg(0), *MRI,
777                        GFCstOrSplatGFCstMatch(FValReg)));
778 
779   auto SmallZeroSplat2 = B.buildBuildVector(v2s64, {FPZero, FPZero}).getReg(0);
780   auto SmallZeroSplat3 = B.buildCopy(v2s64, SmallZeroSplat).getReg(0);
781   auto LargeZeroSplat2 =
782       B.buildConcatVectors(v4s64, {SmallZeroSplat2, SmallZeroSplat3});
783   EXPECT_TRUE(mi_match(LargeZeroSplat2.getReg(0), *MRI,
784                        GFCstOrSplatGFCstMatch(FValReg)));
785 
786   // Not all G_CONCAT_VECTORS are splats.
787   auto SmallOneSplat = B.buildBuildVector(v2s64, {FPOne, FPOne}).getReg(0);
788   auto LargeMixedSplat =
789       B.buildConcatVectors(v4s64, {SmallZeroSplat, SmallOneSplat});
790   EXPECT_FALSE(mi_match(LargeMixedSplat.getReg(0), *MRI,
791                         GFCstOrSplatGFCstMatch(FValReg)));
792 
793   auto SmallMixedSplat = B.buildBuildVector(v2s64, {FPOne, FPZero}).getReg(0);
794   auto LargeSplat =
795       B.buildConcatVectors(v4s64, {SmallMixedSplat, SmallMixedSplat});
796   EXPECT_FALSE(
797       mi_match(LargeSplat.getReg(0), *MRI, GFCstOrSplatGFCstMatch(FValReg)));
798 
799   auto SmallUndefSplat = B.buildBuildVector(v2s64, {Undef, Undef}).getReg(0);
800   auto LargeUndefSplat =
801       B.buildConcatVectors(v4s64, {SmallUndefSplat, SmallUndefSplat});
802   EXPECT_FALSE(mi_match(LargeUndefSplat.getReg(0), *MRI,
803                         GFCstOrSplatGFCstMatch(FValReg)));
804 
805   auto UndefVec = B.buildUndef(v2s64).getReg(0);
806   auto LargeUndefSplat2 = B.buildConcatVectors(v4s64, {UndefVec, UndefVec});
807   EXPECT_FALSE(mi_match(LargeUndefSplat2.getReg(0), *MRI,
808                         GFCstOrSplatGFCstMatch(FValReg)));
809 }
810 
811 TEST_F(AArch64GISelMITest, MatchNeg) {
812   setUp();
813   if (!TM)
814     GTEST_SKIP();
815 
816   LLT s64 = LLT::scalar(64);
817   auto Zero = B.buildConstant(LLT::scalar(64), 0);
818   auto NegInst = B.buildSub(s64, Zero, Copies[0]);
819   Register NegatedReg;
820 
821   // Match: G_SUB = 0, %Reg
822   EXPECT_TRUE(mi_match(NegInst.getReg(0), *MRI, m_Neg(m_Reg(NegatedReg))));
823   EXPECT_EQ(NegatedReg, Copies[0]);
824 
825   // Don't match: G_SUB = %Reg, 0
826   auto NotNegInst1 = B.buildSub(s64, Copies[0], Zero);
827   EXPECT_FALSE(mi_match(NotNegInst1.getReg(0), *MRI, m_Neg(m_Reg(NegatedReg))));
828 
829   // Don't match: G_SUB = 42, %Reg
830   auto FortyTwo = B.buildConstant(LLT::scalar(64), 42);
831   auto NotNegInst2 = B.buildSub(s64, FortyTwo, Copies[0]);
832   EXPECT_FALSE(mi_match(NotNegInst2.getReg(0), *MRI, m_Neg(m_Reg(NegatedReg))));
833 
834   // Complex testcase.
835   // %sub = G_SUB = 0, %negated_reg
836   // %add = G_ADD = %x, %sub
837   auto AddInst = B.buildAdd(s64, Copies[1], NegInst);
838   NegatedReg = Register();
839   EXPECT_TRUE(mi_match(AddInst.getReg(2), *MRI, m_Neg(m_Reg(NegatedReg))));
840   EXPECT_EQ(NegatedReg, Copies[0]);
841 }
842 
843 TEST_F(AArch64GISelMITest, MatchNot) {
844   setUp();
845   if (!TM)
846     GTEST_SKIP();
847 
848   LLT s64 = LLT::scalar(64);
849   auto AllOnes = B.buildConstant(LLT::scalar(64), -1);
850   auto NotInst1 = B.buildXor(s64, Copies[0], AllOnes);
851   Register NotReg;
852 
853   // Match: G_XOR %NotReg, -1
854   EXPECT_TRUE(mi_match(NotInst1.getReg(0), *MRI, m_Not(m_Reg(NotReg))));
855   EXPECT_EQ(NotReg, Copies[0]);
856 
857   // Match: G_XOR -1, %NotReg
858   auto NotInst2 = B.buildXor(s64, AllOnes, Copies[1]);
859   EXPECT_TRUE(mi_match(NotInst2.getReg(0), *MRI, m_Not(m_Reg(NotReg))));
860   EXPECT_EQ(NotReg, Copies[1]);
861 
862   // Don't match: G_XOR %NotReg, 42
863   auto FortyTwo = B.buildConstant(LLT::scalar(64), 42);
864   auto WrongCst = B.buildXor(s64, Copies[0], FortyTwo);
865   EXPECT_FALSE(mi_match(WrongCst.getReg(0), *MRI, m_Not(m_Reg(NotReg))));
866 
867   // Complex testcase.
868   // %xor = G_XOR %NotReg, -1
869   // %add = G_ADD %x, %xor
870   auto AddInst = B.buildAdd(s64, Copies[1], NotInst1);
871   NotReg = Register();
872   EXPECT_TRUE(mi_match(AddInst.getReg(2), *MRI, m_Not(m_Reg(NotReg))));
873   EXPECT_EQ(NotReg, Copies[0]);
874 }
875 
876 TEST_F(AArch64GISelMITest, MatchSpecificReg) {
877   setUp();
878   if (!TM)
879     GTEST_SKIP();
880   auto Cst1 = B.buildConstant(LLT::scalar(64), 42);
881   auto Cst2 = B.buildConstant(LLT::scalar(64), 314);
882   Register Reg = Cst1.getReg(0);
883   // Basic case: Same register twice.
884   EXPECT_TRUE(mi_match(Reg, *MRI, m_SpecificReg(Reg)));
885   // Basic case: Two explicitly different registers.
886   EXPECT_FALSE(mi_match(Reg, *MRI, m_SpecificReg(Cst2.getReg(0))));
887   // Check that we can tell that an instruction uses a specific register.
888   auto Add = B.buildAdd(LLT::scalar(64), Cst1, Cst2);
889   EXPECT_TRUE(mi_match(Add.getReg(0), *MRI, m_GAdd(m_SpecificReg(Reg), m_Reg())));
890 }
891 
892 } // namespace
893 
894 int main(int argc, char **argv) {
895   ::testing::InitGoogleTest(&argc, argv);
896   initLLVM();
897   return RUN_ALL_TESTS();
898 }
899