xref: /llvm-project/llvm/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp (revision 45b9c6b01f263d087a456b098b36e9ee90c607b2)
1 //===- PatternMatchTest.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "GISelMITest.h"
10 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12 #include "llvm/CodeGen/GlobalISel/Utils.h"
13 #include "llvm/CodeGen/MIRParser/MIRParser.h"
14 #include "llvm/CodeGen/MachineFunction.h"
15 #include "llvm/CodeGen/MachineModuleInfo.h"
16 #include "llvm/CodeGen/TargetFrameLowering.h"
17 #include "llvm/CodeGen/TargetInstrInfo.h"
18 #include "llvm/CodeGen/TargetLowering.h"
19 #include "llvm/CodeGen/TargetSubtargetInfo.h"
20 #include "llvm/MC/TargetRegistry.h"
21 #include "llvm/Support/SourceMgr.h"
22 #include "llvm/Support/TargetSelect.h"
23 #include "llvm/Target/TargetMachine.h"
24 #include "llvm/Target/TargetOptions.h"
25 #include "gtest/gtest.h"
26 
27 using namespace llvm;
28 using namespace MIPatternMatch;
29 
30 namespace {
31 
32 TEST_F(AArch64GISelMITest, MatchIntConstant) {
33   setUp();
34   if (!TM)
35     return;
36   auto MIBCst = B.buildConstant(LLT::scalar(64), 42);
37   int64_t Cst;
38   bool match = mi_match(MIBCst.getReg(0), *MRI, m_ICst(Cst));
39   EXPECT_TRUE(match);
40   EXPECT_EQ(Cst, 42);
41 }
42 
43 TEST_F(AArch64GISelMITest, MatchIntConstantRegister) {
44   setUp();
45   if (!TM)
46     return;
47   auto MIBCst = B.buildConstant(LLT::scalar(64), 42);
48   Optional<ValueAndVReg> Src0;
49   bool match = mi_match(MIBCst.getReg(0), *MRI, m_GCst(Src0));
50   EXPECT_TRUE(match);
51   EXPECT_EQ(Src0->VReg, MIBCst.getReg(0));
52 }
53 
54 TEST_F(AArch64GISelMITest, MatchIntConstantSplat) {
55   setUp();
56   if (!TM)
57     return;
58 
59   LLT s64 = LLT::scalar(64);
60   LLT v4s64 = LLT::fixed_vector(4, s64);
61 
62   MachineInstrBuilder FortyTwoSplat =
63       B.buildSplatVector(v4s64, B.buildConstant(s64, 42));
64   int64_t Cst;
65   EXPECT_TRUE(mi_match(FortyTwoSplat.getReg(0), *MRI, m_ICstOrSplat(Cst)));
66   EXPECT_EQ(Cst, 42);
67 
68   MachineInstrBuilder NonConstantSplat =
69       B.buildBuildVector(v4s64, {Copies[0], Copies[0], Copies[0], Copies[0]});
70   EXPECT_FALSE(mi_match(NonConstantSplat.getReg(0), *MRI, m_ICstOrSplat(Cst)));
71 }
72 
73 TEST_F(AArch64GISelMITest, MachineInstrPtrBind) {
74   setUp();
75   if (!TM)
76     return;
77   auto MIBAdd = B.buildAdd(LLT::scalar(64), Copies[0], Copies[1]);
78   // Test 'MachineInstr *' bind.
79   // Default mi_match.
80   MachineInstr *MIPtr = MIBAdd.getInstr();
81   bool match = mi_match(MIPtr, *MRI, m_GAdd(m_Reg(), m_Reg()));
82   EXPECT_TRUE(match);
83   // Specialized mi_match for MachineInstr &.
84   MachineInstr &MI = *MIBAdd.getInstr();
85   match = mi_match(MI, *MRI, m_GAdd(m_Reg(), m_Reg()));
86   EXPECT_TRUE(match);
87   // MachineInstrBuilder has automatic conversion to MachineInstr *.
88   match = mi_match(MIBAdd, *MRI, m_GAdd(m_Reg(), m_Reg()));
89   EXPECT_TRUE(match);
90   // Match instruction without def.
91   auto MIBBrcond = B.buildBrCond(Copies[0], B.getMBB());
92   MachineInstr *MatchedMI;
93   match = mi_match(MIBBrcond, *MRI, m_MInstr(MatchedMI));
94   EXPECT_TRUE(match);
95   EXPECT_TRUE(MIBBrcond.getInstr() == MatchedMI);
96   // Match instruction with two defs.
97   auto MIBUAddO =
98       B.buildUAddo(LLT::scalar(64), LLT::scalar(1), Copies[0], Copies[1]);
99   match = mi_match(MIBUAddO, *MRI, m_MInstr(MatchedMI));
100   EXPECT_TRUE(match);
101   EXPECT_TRUE(MIBUAddO.getInstr() == MatchedMI);
102 }
103 
104 TEST_F(AArch64GISelMITest, MatchBinaryOp) {
105   setUp();
106   if (!TM)
107     return;
108   LLT s32 = LLT::scalar(32);
109   LLT s64 = LLT::scalar(64);
110   LLT p0 = LLT::pointer(0, 64);
111   auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]);
112   // Test case for no bind.
113   bool match =
114       mi_match(MIBAdd.getReg(0), *MRI, m_GAdd(m_Reg(), m_Reg()));
115   EXPECT_TRUE(match);
116   Register Src0, Src1, Src2;
117   match = mi_match(MIBAdd.getReg(0), *MRI,
118                    m_GAdd(m_Reg(Src0), m_Reg(Src1)));
119   EXPECT_TRUE(match);
120   EXPECT_EQ(Src0, Copies[0]);
121   EXPECT_EQ(Src1, Copies[1]);
122 
123   // Build MUL(ADD %0, %1), %2
124   auto MIBMul = B.buildMul(s64, MIBAdd, Copies[2]);
125 
126   // Try to match MUL.
127   match = mi_match(MIBMul.getReg(0), *MRI,
128                    m_GMul(m_Reg(Src0), m_Reg(Src1)));
129   EXPECT_TRUE(match);
130   EXPECT_EQ(Src0, MIBAdd.getReg(0));
131   EXPECT_EQ(Src1, Copies[2]);
132 
133   // Try to match MUL(ADD)
134   match = mi_match(MIBMul.getReg(0), *MRI,
135                    m_GMul(m_GAdd(m_Reg(Src0), m_Reg(Src1)), m_Reg(Src2)));
136   EXPECT_TRUE(match);
137   EXPECT_EQ(Src0, Copies[0]);
138   EXPECT_EQ(Src1, Copies[1]);
139   EXPECT_EQ(Src2, Copies[2]);
140 
141   // Test Commutativity.
142   auto MIBMul2 = B.buildMul(s64, Copies[0], B.buildConstant(s64, 42));
143   // Try to match MUL(Cst, Reg) on src of MUL(Reg, Cst) to validate
144   // commutativity.
145   int64_t Cst;
146   match = mi_match(MIBMul2.getReg(0), *MRI,
147                    m_GMul(m_ICst(Cst), m_Reg(Src0)));
148   EXPECT_TRUE(match);
149   EXPECT_EQ(Cst, 42);
150   EXPECT_EQ(Src0, Copies[0]);
151 
152   // Make sure commutative doesn't work with something like SUB.
153   auto MIBSub = B.buildSub(s64, Copies[0], B.buildConstant(s64, 42));
154   match = mi_match(MIBSub.getReg(0), *MRI,
155                    m_GSub(m_ICst(Cst), m_Reg(Src0)));
156   EXPECT_FALSE(match);
157 
158   auto MIBFMul = B.buildInstr(TargetOpcode::G_FMUL, {s64},
159                               {Copies[0], B.buildConstant(s64, 42)});
160   // Match and test commutativity for FMUL.
161   match = mi_match(MIBFMul.getReg(0), *MRI,
162                    m_GFMul(m_ICst(Cst), m_Reg(Src0)));
163   EXPECT_TRUE(match);
164   EXPECT_EQ(Cst, 42);
165   EXPECT_EQ(Src0, Copies[0]);
166 
167   // FSUB
168   auto MIBFSub = B.buildInstr(TargetOpcode::G_FSUB, {s64},
169                               {Copies[0], B.buildConstant(s64, 42)});
170   match = mi_match(MIBFSub.getReg(0), *MRI,
171                    m_GFSub(m_Reg(Src0), m_Reg()));
172   EXPECT_TRUE(match);
173   EXPECT_EQ(Src0, Copies[0]);
174 
175   // Build AND %0, %1
176   auto MIBAnd = B.buildAnd(s64, Copies[0], Copies[1]);
177   // Try to match AND.
178   match = mi_match(MIBAnd.getReg(0), *MRI,
179                    m_GAnd(m_Reg(Src0), m_Reg(Src1)));
180   EXPECT_TRUE(match);
181   EXPECT_EQ(Src0, Copies[0]);
182   EXPECT_EQ(Src1, Copies[1]);
183 
184   // Build OR %0, %1
185   auto MIBOr = B.buildOr(s64, Copies[0], Copies[1]);
186   // Try to match OR.
187   match = mi_match(MIBOr.getReg(0), *MRI,
188                    m_GOr(m_Reg(Src0), m_Reg(Src1)));
189   EXPECT_TRUE(match);
190   EXPECT_EQ(Src0, Copies[0]);
191   EXPECT_EQ(Src1, Copies[1]);
192 
193   // Match lshr, and make sure a different shift amount type works.
194   auto TruncCopy1 = B.buildTrunc(s32, Copies[1]);
195   auto LShr = B.buildLShr(s64, Copies[0], TruncCopy1);
196   match = mi_match(LShr.getReg(0), *MRI,
197                    m_GLShr(m_Reg(Src0), m_Reg(Src1)));
198   EXPECT_TRUE(match);
199   EXPECT_EQ(Src0, Copies[0]);
200   EXPECT_EQ(Src1, TruncCopy1.getReg(0));
201 
202   // Match shl, and make sure a different shift amount type works.
203   auto Shl = B.buildShl(s64, Copies[0], TruncCopy1);
204   match = mi_match(Shl.getReg(0), *MRI,
205                    m_GShl(m_Reg(Src0), m_Reg(Src1)));
206   EXPECT_TRUE(match);
207   EXPECT_EQ(Src0, Copies[0]);
208   EXPECT_EQ(Src1, TruncCopy1.getReg(0));
209 
210   // Build a G_PTR_ADD and check that we can match it.
211   auto PtrAdd = B.buildPtrAdd(p0, {B.buildUndef(p0)}, Copies[0]);
212   match = mi_match(PtrAdd.getReg(0), *MRI, m_GPtrAdd(m_Reg(Src0), m_Reg(Src1)));
213   EXPECT_TRUE(match);
214   EXPECT_EQ(Src0, PtrAdd->getOperand(1).getReg());
215   EXPECT_EQ(Src1, Copies[0]);
216 
217   auto MIBCst = B.buildConstant(s64, 42);
218   auto MIBAddCst = B.buildAdd(s64, MIBCst, Copies[0]);
219   auto MIBUnmerge = B.buildUnmerge({s32, s32}, B.buildConstant(s64, 42));
220 
221   // m_BinOp with opcode.
222   // Match binary instruction, opcode and its non-commutative operands.
223   match = mi_match(MIBAddCst, *MRI,
224                    m_BinOp(TargetOpcode::G_ADD, m_ICst(Cst), m_Reg(Src0)));
225   EXPECT_TRUE(match);
226   EXPECT_EQ(Src0, Copies[0]);
227   EXPECT_EQ(Cst, 42);
228 
229   // Opcode doesn't match.
230   match = mi_match(MIBAddCst, *MRI,
231                    m_BinOp(TargetOpcode::G_MUL, m_ICst(Cst), m_Reg(Src0)));
232   EXPECT_FALSE(match);
233 
234   match = mi_match(MIBAddCst, *MRI,
235                    m_BinOp(TargetOpcode::G_ADD, m_Reg(Src0), m_ICst(Cst)));
236   EXPECT_FALSE(match);
237 
238   // Instruction is not binary.
239   match = mi_match(MIBCst, *MRI,
240                    m_BinOp(TargetOpcode::G_MUL, m_Reg(Src0), m_Reg(Src1)));
241   EXPECT_FALSE(match);
242   match = mi_match(MIBUnmerge, *MRI,
243                    m_BinOp(TargetOpcode::G_MUL, m_Reg(Src0), m_Reg(Src1)));
244   EXPECT_FALSE(match);
245 
246   // m_CommutativeBinOp with opcode.
247   match = mi_match(
248       MIBAddCst, *MRI,
249       m_CommutativeBinOp(TargetOpcode::G_ADD, m_ICst(Cst), m_Reg(Src0)));
250   EXPECT_TRUE(match);
251   EXPECT_EQ(Src0, Copies[0]);
252   EXPECT_EQ(Cst, 42);
253 
254   match = mi_match(
255       MIBAddCst, *MRI,
256       m_CommutativeBinOp(TargetOpcode::G_MUL, m_ICst(Cst), m_Reg(Src0)));
257   EXPECT_FALSE(match);
258 
259   match = mi_match(
260       MIBAddCst, *MRI,
261       m_CommutativeBinOp(TargetOpcode::G_ADD, m_Reg(Src0), m_ICst(Cst)));
262   EXPECT_TRUE(match);
263   EXPECT_EQ(Src0, Copies[0]);
264   EXPECT_EQ(Cst, 42);
265 
266   match = mi_match(
267       MIBCst, *MRI,
268       m_CommutativeBinOp(TargetOpcode::G_MUL, m_Reg(Src0), m_Reg(Src1)));
269   EXPECT_FALSE(match);
270   match = mi_match(
271       MIBUnmerge, *MRI,
272       m_CommutativeBinOp(TargetOpcode::G_MUL, m_Reg(Src0), m_Reg(Src1)));
273   EXPECT_FALSE(match);
274 }
275 
276 TEST_F(AArch64GISelMITest, MatchICmp) {
277   setUp();
278   if (!TM)
279     return;
280 
281   const LLT s1 = LLT::scalar(1);
282   auto CmpEq = B.buildICmp(CmpInst::ICMP_EQ, s1, Copies[0], Copies[1]);
283 
284   // Check match any predicate.
285   bool match =
286       mi_match(CmpEq.getReg(0), *MRI, m_GICmp(m_Pred(), m_Reg(), m_Reg()));
287   EXPECT_TRUE(match);
288 
289   // Check we get the predicate and registers.
290   CmpInst::Predicate Pred;
291   Register Reg0;
292   Register Reg1;
293   match = mi_match(CmpEq.getReg(0), *MRI,
294                    m_GICmp(m_Pred(Pred), m_Reg(Reg0), m_Reg(Reg1)));
295   EXPECT_TRUE(match);
296   EXPECT_EQ(CmpInst::ICMP_EQ, Pred);
297   EXPECT_EQ(Copies[0], Reg0);
298   EXPECT_EQ(Copies[1], Reg1);
299 }
300 
301 TEST_F(AArch64GISelMITest, MatchFCmp) {
302   setUp();
303   if (!TM)
304     return;
305 
306   const LLT s1 = LLT::scalar(1);
307   auto CmpEq = B.buildFCmp(CmpInst::FCMP_OEQ, s1, Copies[0], Copies[1]);
308 
309   // Check match any predicate.
310   bool match =
311       mi_match(CmpEq.getReg(0), *MRI, m_GFCmp(m_Pred(), m_Reg(), m_Reg()));
312   EXPECT_TRUE(match);
313 
314   // Check we get the predicate and registers.
315   CmpInst::Predicate Pred;
316   Register Reg0;
317   Register Reg1;
318   match = mi_match(CmpEq.getReg(0), *MRI,
319                    m_GFCmp(m_Pred(Pred), m_Reg(Reg0), m_Reg(Reg1)));
320   EXPECT_TRUE(match);
321   EXPECT_EQ(CmpInst::FCMP_OEQ, Pred);
322   EXPECT_EQ(Copies[0], Reg0);
323   EXPECT_EQ(Copies[1], Reg1);
324 }
325 
326 TEST_F(AArch64GISelMITest, MatcCommutativeICmp) {
327   setUp();
328   if (!TM)
329     return;
330   const LLT s1 = LLT::scalar(1);
331   Register LHS = Copies[0];
332   Register RHS = Copies[1];
333   CmpInst::Predicate MatchedPred;
334   bool Match = false;
335   for (unsigned P = CmpInst::Predicate::FIRST_ICMP_PREDICATE;
336        P < CmpInst::Predicate::LAST_ICMP_PREDICATE; ++P) {
337     auto CurrPred = static_cast<CmpInst::Predicate>(P);
338     auto Cmp = B.buildICmp(CurrPred, s1, LHS, RHS);
339     // Basic matching.
340     Match = mi_match(
341         Cmp.getReg(0), *MRI,
342         m_c_GICmp(m_Pred(MatchedPred), m_SpecificReg(LHS), m_SpecificReg(RHS)));
343     EXPECT_TRUE(Match);
344     EXPECT_EQ(MatchedPred, CurrPred);
345     // Commuting operands should still match, but the predicate should be
346     // swapped.
347     Match = mi_match(
348         Cmp.getReg(0), *MRI,
349         m_c_GICmp(m_Pred(MatchedPred), m_SpecificReg(RHS), m_SpecificReg(LHS)));
350     EXPECT_TRUE(Match);
351     EXPECT_EQ(MatchedPred, CmpInst::getSwappedPredicate(CurrPred));
352   }
353 }
354 
355 TEST_F(AArch64GISelMITest, MatcCommutativeFCmp) {
356   setUp();
357   if (!TM)
358     return;
359   const LLT s1 = LLT::scalar(1);
360   Register LHS = Copies[0];
361   Register RHS = Copies[1];
362   CmpInst::Predicate MatchedPred;
363   bool Match = false;
364   for (unsigned P = CmpInst::Predicate::FIRST_FCMP_PREDICATE;
365        P < CmpInst::Predicate::LAST_FCMP_PREDICATE; ++P) {
366     auto CurrPred = static_cast<CmpInst::Predicate>(P);
367     auto Cmp = B.buildFCmp(CurrPred, s1, LHS, RHS);
368     // Basic matching.
369     Match = mi_match(
370         Cmp.getReg(0), *MRI,
371         m_c_GFCmp(m_Pred(MatchedPred), m_SpecificReg(LHS), m_SpecificReg(RHS)));
372     EXPECT_TRUE(Match);
373     EXPECT_EQ(MatchedPred, CurrPred);
374     // Commuting operands should still match, but the predicate should be
375     // swapped.
376     Match = mi_match(
377         Cmp.getReg(0), *MRI,
378         m_c_GFCmp(m_Pred(MatchedPred), m_SpecificReg(RHS), m_SpecificReg(LHS)));
379     EXPECT_TRUE(Match);
380     EXPECT_EQ(MatchedPred, CmpInst::getSwappedPredicate(CurrPred));
381   }
382 }
383 
384 TEST_F(AArch64GISelMITest, MatchFPUnaryOp) {
385   setUp();
386   if (!TM)
387     return;
388 
389   // Truncate s64 to s32.
390   LLT s32 = LLT::scalar(32);
391   auto Copy0s32 = B.buildFPTrunc(s32, Copies[0]);
392 
393   // Match G_FABS.
394   auto MIBFabs = B.buildInstr(TargetOpcode::G_FABS, {s32}, {Copy0s32});
395   bool match =
396       mi_match(MIBFabs.getReg(0), *MRI, m_GFabs(m_Reg()));
397   EXPECT_TRUE(match);
398 
399   Register Src;
400   auto MIBFNeg = B.buildInstr(TargetOpcode::G_FNEG, {s32}, {Copy0s32});
401   match = mi_match(MIBFNeg.getReg(0), *MRI, m_GFNeg(m_Reg(Src)));
402   EXPECT_TRUE(match);
403   EXPECT_EQ(Src, Copy0s32.getReg(0));
404 
405   match = mi_match(MIBFabs.getReg(0), *MRI, m_GFabs(m_Reg(Src)));
406   EXPECT_TRUE(match);
407   EXPECT_EQ(Src, Copy0s32.getReg(0));
408 
409   // Build and match FConstant.
410   auto MIBFCst = B.buildFConstant(s32, .5);
411   const ConstantFP *TmpFP{};
412   match = mi_match(MIBFCst.getReg(0), *MRI, m_GFCst(TmpFP));
413   EXPECT_TRUE(match);
414   EXPECT_TRUE(TmpFP);
415   APFloat APF((float).5);
416   auto *CFP = ConstantFP::get(Context, APF);
417   EXPECT_EQ(CFP, TmpFP);
418 
419   // Build double float.
420   LLT s64 = LLT::scalar(64);
421   auto MIBFCst64 = B.buildFConstant(s64, .5);
422   const ConstantFP *TmpFP64{};
423   match = mi_match(MIBFCst64.getReg(0), *MRI, m_GFCst(TmpFP64));
424   EXPECT_TRUE(match);
425   EXPECT_TRUE(TmpFP64);
426   APFloat APF64(.5);
427   auto CFP64 = ConstantFP::get(Context, APF64);
428   EXPECT_EQ(CFP64, TmpFP64);
429   EXPECT_NE(TmpFP64, TmpFP);
430 
431   // Build half float.
432   LLT s16 = LLT::scalar(16);
433   auto MIBFCst16 = B.buildFConstant(s16, .5);
434   const ConstantFP *TmpFP16{};
435   match = mi_match(MIBFCst16.getReg(0), *MRI, m_GFCst(TmpFP16));
436   EXPECT_TRUE(match);
437   EXPECT_TRUE(TmpFP16);
438   bool Ignored;
439   APFloat APF16(.5);
440   APF16.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
441   auto CFP16 = ConstantFP::get(Context, APF16);
442   EXPECT_EQ(TmpFP16, CFP16);
443   EXPECT_NE(TmpFP16, TmpFP);
444 }
445 
446 TEST_F(AArch64GISelMITest, MatchExtendsTrunc) {
447   setUp();
448   if (!TM)
449     return;
450 
451   LLT s64 = LLT::scalar(64);
452   LLT s32 = LLT::scalar(32);
453 
454   auto MIBTrunc = B.buildTrunc(s32, Copies[0]);
455   auto MIBAExt = B.buildAnyExt(s64, MIBTrunc);
456   auto MIBZExt = B.buildZExt(s64, MIBTrunc);
457   auto MIBSExt = B.buildSExt(s64, MIBTrunc);
458   Register Src0;
459   bool match =
460       mi_match(MIBTrunc.getReg(0), *MRI, m_GTrunc(m_Reg(Src0)));
461   EXPECT_TRUE(match);
462   EXPECT_EQ(Src0, Copies[0]);
463   match =
464       mi_match(MIBAExt.getReg(0), *MRI, m_GAnyExt(m_Reg(Src0)));
465   EXPECT_TRUE(match);
466   EXPECT_EQ(Src0, MIBTrunc.getReg(0));
467 
468   match = mi_match(MIBSExt.getReg(0), *MRI, m_GSExt(m_Reg(Src0)));
469   EXPECT_TRUE(match);
470   EXPECT_EQ(Src0, MIBTrunc.getReg(0));
471 
472   match = mi_match(MIBZExt.getReg(0), *MRI, m_GZExt(m_Reg(Src0)));
473   EXPECT_TRUE(match);
474   EXPECT_EQ(Src0, MIBTrunc.getReg(0));
475 
476   // Match ext(trunc src)
477   match = mi_match(MIBAExt.getReg(0), *MRI,
478                    m_GAnyExt(m_GTrunc(m_Reg(Src0))));
479   EXPECT_TRUE(match);
480   EXPECT_EQ(Src0, Copies[0]);
481 
482   match = mi_match(MIBSExt.getReg(0), *MRI,
483                    m_GSExt(m_GTrunc(m_Reg(Src0))));
484   EXPECT_TRUE(match);
485   EXPECT_EQ(Src0, Copies[0]);
486 
487   match = mi_match(MIBZExt.getReg(0), *MRI,
488                    m_GZExt(m_GTrunc(m_Reg(Src0))));
489   EXPECT_TRUE(match);
490   EXPECT_EQ(Src0, Copies[0]);
491 }
492 
493 TEST_F(AArch64GISelMITest, MatchSpecificType) {
494   setUp();
495   if (!TM)
496     return;
497 
498   // Try to match a 64bit add.
499   LLT s64 = LLT::scalar(64);
500   LLT s32 = LLT::scalar(32);
501   auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]);
502   EXPECT_FALSE(mi_match(MIBAdd.getReg(0), *MRI,
503                         m_GAdd(m_SpecificType(s32), m_Reg())));
504   EXPECT_TRUE(mi_match(MIBAdd.getReg(0), *MRI,
505                        m_GAdd(m_SpecificType(s64), m_Reg())));
506 
507   // Try to match the destination type of a bitcast.
508   LLT v2s32 = LLT::fixed_vector(2, 32);
509   auto MIBCast = B.buildCast(v2s32, Copies[0]);
510   EXPECT_TRUE(
511       mi_match(MIBCast.getReg(0), *MRI, m_GBitcast(m_Reg())));
512   EXPECT_TRUE(
513       mi_match(MIBCast.getReg(0), *MRI, m_SpecificType(v2s32)));
514   EXPECT_TRUE(
515       mi_match(MIBCast.getReg(1), *MRI, m_SpecificType(s64)));
516 
517   // Build a PTRToInt and INTTOPTR and match and test them.
518   LLT PtrTy = LLT::pointer(0, 64);
519   auto MIBIntToPtr = B.buildCast(PtrTy, Copies[0]);
520   auto MIBPtrToInt = B.buildCast(s64, MIBIntToPtr);
521   Register Src0;
522 
523   // match the ptrtoint(inttoptr reg)
524   bool match = mi_match(MIBPtrToInt.getReg(0), *MRI,
525                         m_GPtrToInt(m_GIntToPtr(m_Reg(Src0))));
526   EXPECT_TRUE(match);
527   EXPECT_EQ(Src0, Copies[0]);
528 }
529 
530 TEST_F(AArch64GISelMITest, MatchCombinators) {
531   setUp();
532   if (!TM)
533     return;
534 
535   LLT s64 = LLT::scalar(64);
536   LLT s32 = LLT::scalar(32);
537   auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]);
538   Register Src0, Src1;
539   bool match =
540       mi_match(MIBAdd.getReg(0), *MRI,
541                m_all_of(m_SpecificType(s64), m_GAdd(m_Reg(Src0), m_Reg(Src1))));
542   EXPECT_TRUE(match);
543   EXPECT_EQ(Src0, Copies[0]);
544   EXPECT_EQ(Src1, Copies[1]);
545   // Check for s32 (which should fail).
546   match =
547       mi_match(MIBAdd.getReg(0), *MRI,
548                m_all_of(m_SpecificType(s32), m_GAdd(m_Reg(Src0), m_Reg(Src1))));
549   EXPECT_FALSE(match);
550   match =
551       mi_match(MIBAdd.getReg(0), *MRI,
552                m_any_of(m_SpecificType(s32), m_GAdd(m_Reg(Src0), m_Reg(Src1))));
553   EXPECT_TRUE(match);
554   EXPECT_EQ(Src0, Copies[0]);
555   EXPECT_EQ(Src1, Copies[1]);
556 
557   // Match a case where none of the predicates hold true.
558   match = mi_match(
559       MIBAdd.getReg(0), *MRI,
560       m_any_of(m_SpecificType(LLT::scalar(16)), m_GSub(m_Reg(), m_Reg())));
561   EXPECT_FALSE(match);
562 }
563 
564 TEST_F(AArch64GISelMITest, MatchMiscellaneous) {
565   setUp();
566   if (!TM)
567     return;
568 
569   LLT s64 = LLT::scalar(64);
570   auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]);
571   Register Reg = MIBAdd.getReg(0);
572 
573   // Only one use of Reg.
574   B.buildCast(LLT::pointer(0, 32), MIBAdd);
575   EXPECT_TRUE(mi_match(Reg, *MRI, m_OneUse(m_GAdd(m_Reg(), m_Reg()))));
576   EXPECT_TRUE(mi_match(Reg, *MRI, m_OneNonDBGUse(m_GAdd(m_Reg(), m_Reg()))));
577 
578   // Add multiple debug uses of Reg.
579   B.buildInstr(TargetOpcode::DBG_VALUE, {}, {Reg});
580   B.buildInstr(TargetOpcode::DBG_VALUE, {}, {Reg});
581 
582   EXPECT_FALSE(mi_match(Reg, *MRI, m_OneUse(m_GAdd(m_Reg(), m_Reg()))));
583   EXPECT_TRUE(mi_match(Reg, *MRI, m_OneNonDBGUse(m_GAdd(m_Reg(), m_Reg()))));
584 
585   // Multiple non-debug uses of Reg.
586   B.buildCast(LLT::pointer(1, 32), MIBAdd);
587   EXPECT_FALSE(mi_match(Reg, *MRI, m_OneUse(m_GAdd(m_Reg(), m_Reg()))));
588   EXPECT_FALSE(mi_match(Reg, *MRI, m_OneNonDBGUse(m_GAdd(m_Reg(), m_Reg()))));
589 }
590 
591 TEST_F(AArch64GISelMITest, MatchSpecificConstant) {
592   setUp();
593   if (!TM)
594     return;
595 
596   // Basic case: Can we match a G_CONSTANT with a specific value?
597   auto FortyTwo = B.buildConstant(LLT::scalar(64), 42);
598   EXPECT_TRUE(mi_match(FortyTwo.getReg(0), *MRI, m_SpecificICst(42)));
599   EXPECT_FALSE(mi_match(FortyTwo.getReg(0), *MRI, m_SpecificICst(123)));
600 
601   // Test that this works inside of a more complex pattern.
602   LLT s64 = LLT::scalar(64);
603   auto MIBAdd = B.buildAdd(s64, Copies[0], FortyTwo);
604   EXPECT_TRUE(mi_match(MIBAdd.getReg(2), *MRI, m_SpecificICst(42)));
605 
606   // Wrong constant.
607   EXPECT_FALSE(mi_match(MIBAdd.getReg(2), *MRI, m_SpecificICst(123)));
608 
609   // No constant on the LHS.
610   EXPECT_FALSE(mi_match(MIBAdd.getReg(1), *MRI, m_SpecificICst(42)));
611 }
612 
613 TEST_F(AArch64GISelMITest, MatchSpecificConstantSplat) {
614   setUp();
615   if (!TM)
616     return;
617 
618   LLT s64 = LLT::scalar(64);
619   LLT v4s64 = LLT::fixed_vector(4, s64);
620 
621   MachineInstrBuilder FortyTwoSplat =
622       B.buildSplatVector(v4s64, B.buildConstant(s64, 42));
623   MachineInstrBuilder FortyTwo = B.buildConstant(s64, 42);
624 
625   EXPECT_TRUE(mi_match(FortyTwoSplat.getReg(0), *MRI, m_SpecificICstSplat(42)));
626   EXPECT_FALSE(
627       mi_match(FortyTwoSplat.getReg(0), *MRI, m_SpecificICstSplat(43)));
628   EXPECT_FALSE(mi_match(FortyTwo.getReg(0), *MRI, m_SpecificICstSplat(42)));
629 
630   MachineInstrBuilder NonConstantSplat =
631       B.buildBuildVector(v4s64, {Copies[0], Copies[0], Copies[0], Copies[0]});
632 
633   MachineInstrBuilder AddSplat =
634       B.buildAdd(v4s64, NonConstantSplat, FortyTwoSplat);
635   EXPECT_TRUE(mi_match(AddSplat.getReg(2), *MRI, m_SpecificICstSplat(42)));
636   EXPECT_FALSE(mi_match(AddSplat.getReg(2), *MRI, m_SpecificICstSplat(43)));
637   EXPECT_FALSE(mi_match(AddSplat.getReg(1), *MRI, m_SpecificICstSplat(42)));
638 
639   MachineInstrBuilder Add = B.buildAdd(s64, Copies[0], FortyTwo);
640   EXPECT_FALSE(mi_match(Add.getReg(2), *MRI, m_SpecificICstSplat(42)));
641 }
642 
643 TEST_F(AArch64GISelMITest, MatchSpecificConstantOrSplat) {
644   setUp();
645   if (!TM)
646     return;
647 
648   LLT s64 = LLT::scalar(64);
649   LLT v4s64 = LLT::fixed_vector(4, s64);
650 
651   MachineInstrBuilder FortyTwoSplat =
652       B.buildSplatVector(v4s64, B.buildConstant(s64, 42));
653   MachineInstrBuilder FortyTwo = B.buildConstant(s64, 42);
654 
655   EXPECT_TRUE(
656       mi_match(FortyTwoSplat.getReg(0), *MRI, m_SpecificICstOrSplat(42)));
657   EXPECT_FALSE(
658       mi_match(FortyTwoSplat.getReg(0), *MRI, m_SpecificICstOrSplat(43)));
659   EXPECT_TRUE(mi_match(FortyTwo.getReg(0), *MRI, m_SpecificICstOrSplat(42)));
660 
661   MachineInstrBuilder NonConstantSplat =
662       B.buildBuildVector(v4s64, {Copies[0], Copies[0], Copies[0], Copies[0]});
663 
664   MachineInstrBuilder AddSplat =
665       B.buildAdd(v4s64, NonConstantSplat, FortyTwoSplat);
666   EXPECT_TRUE(mi_match(AddSplat.getReg(2), *MRI, m_SpecificICstOrSplat(42)));
667   EXPECT_FALSE(mi_match(AddSplat.getReg(2), *MRI, m_SpecificICstOrSplat(43)));
668   EXPECT_FALSE(mi_match(AddSplat.getReg(1), *MRI, m_SpecificICstOrSplat(42)));
669 
670   MachineInstrBuilder Add = B.buildAdd(s64, Copies[0], FortyTwo);
671   EXPECT_TRUE(mi_match(Add.getReg(2), *MRI, m_SpecificICstOrSplat(42)));
672 }
673 
674 TEST_F(AArch64GISelMITest, MatchZeroInt) {
675   setUp();
676   if (!TM)
677     return;
678   auto Zero = B.buildConstant(LLT::scalar(64), 0);
679   EXPECT_TRUE(mi_match(Zero.getReg(0), *MRI, m_ZeroInt()));
680 
681   auto FortyTwo = B.buildConstant(LLT::scalar(64), 42);
682   EXPECT_FALSE(mi_match(FortyTwo.getReg(0), *MRI, m_ZeroInt()));
683 }
684 
685 TEST_F(AArch64GISelMITest, MatchAllOnesInt) {
686   setUp();
687   if (!TM)
688     return;
689   auto AllOnes = B.buildConstant(LLT::scalar(64), -1);
690   EXPECT_TRUE(mi_match(AllOnes.getReg(0), *MRI, m_AllOnesInt()));
691 
692   auto FortyTwo = B.buildConstant(LLT::scalar(64), 42);
693   EXPECT_FALSE(mi_match(FortyTwo.getReg(0), *MRI, m_AllOnesInt()));
694 }
695 
696 TEST_F(AArch64GISelMITest, MatchFPOrIntConst) {
697   setUp();
698   if (!TM)
699     return;
700 
701   Register IntOne = B.buildConstant(LLT::scalar(64), 1).getReg(0);
702   Register FPOne = B.buildFConstant(LLT::scalar(64), 1.0).getReg(0);
703   Optional<ValueAndVReg> ValReg;
704   Optional<FPValueAndVReg> FValReg;
705 
706   EXPECT_TRUE(mi_match(IntOne, *MRI, m_GCst(ValReg)));
707   EXPECT_EQ(IntOne, ValReg->VReg);
708   EXPECT_FALSE(mi_match(IntOne, *MRI, m_GFCst(FValReg)));
709 
710   EXPECT_FALSE(mi_match(FPOne, *MRI, m_GCst(ValReg)));
711   EXPECT_TRUE(mi_match(FPOne, *MRI, m_GFCst(FValReg)));
712   EXPECT_EQ(FPOne, FValReg->VReg);
713 }
714 
715 TEST_F(AArch64GISelMITest, MatchConstantSplat) {
716   setUp();
717   if (!TM)
718     return;
719 
720   LLT s64 = LLT::scalar(64);
721   LLT v4s64 = LLT::fixed_vector(4, 64);
722 
723   Register FPOne = B.buildFConstant(s64, 1.0).getReg(0);
724   Register FPZero = B.buildFConstant(s64, 0.0).getReg(0);
725   Register Undef = B.buildUndef(s64).getReg(0);
726   Optional<FPValueAndVReg> FValReg;
727 
728   // GFCstOrSplatGFCstMatch allows undef as part of splat. Undef often comes
729   // from padding to legalize into available operation and then ignore added
730   // elements e.g. v3s64 to v4s64.
731 
732   EXPECT_TRUE(mi_match(FPZero, *MRI, GFCstOrSplatGFCstMatch(FValReg)));
733   EXPECT_EQ(FPZero, FValReg->VReg);
734 
735   EXPECT_FALSE(mi_match(Undef, *MRI, GFCstOrSplatGFCstMatch(FValReg)));
736 
737   auto ZeroSplat = B.buildBuildVector(v4s64, {FPZero, FPZero, FPZero, FPZero});
738   EXPECT_TRUE(
739       mi_match(ZeroSplat.getReg(0), *MRI, GFCstOrSplatGFCstMatch(FValReg)));
740   EXPECT_EQ(FPZero, FValReg->VReg);
741 
742   auto ZeroUndef = B.buildBuildVector(v4s64, {FPZero, FPZero, FPZero, Undef});
743   EXPECT_TRUE(
744       mi_match(ZeroUndef.getReg(0), *MRI, GFCstOrSplatGFCstMatch(FValReg)));
745   EXPECT_EQ(FPZero, FValReg->VReg);
746 
747   // All undefs are not constant splat.
748   auto UndefSplat = B.buildBuildVector(v4s64, {Undef, Undef, Undef, Undef});
749   EXPECT_FALSE(
750       mi_match(UndefSplat.getReg(0), *MRI, GFCstOrSplatGFCstMatch(FValReg)));
751 
752   auto ZeroOne = B.buildBuildVector(v4s64, {FPZero, FPZero, FPZero, FPOne});
753   EXPECT_FALSE(
754       mi_match(ZeroOne.getReg(0), *MRI, GFCstOrSplatGFCstMatch(FValReg)));
755 
756   auto NonConstantSplat =
757       B.buildBuildVector(v4s64, {Copies[0], Copies[0], Copies[0], Copies[0]});
758   EXPECT_FALSE(mi_match(NonConstantSplat.getReg(0), *MRI,
759                         GFCstOrSplatGFCstMatch(FValReg)));
760 
761   auto Mixed = B.buildBuildVector(v4s64, {FPZero, FPZero, FPZero, Copies[0]});
762   EXPECT_FALSE(
763       mi_match(Mixed.getReg(0), *MRI, GFCstOrSplatGFCstMatch(FValReg)));
764 }
765 
766 TEST_F(AArch64GISelMITest, MatchNeg) {
767   setUp();
768   if (!TM)
769     return;
770 
771   LLT s64 = LLT::scalar(64);
772   auto Zero = B.buildConstant(LLT::scalar(64), 0);
773   auto NegInst = B.buildSub(s64, Zero, Copies[0]);
774   Register NegatedReg;
775 
776   // Match: G_SUB = 0, %Reg
777   EXPECT_TRUE(mi_match(NegInst.getReg(0), *MRI, m_Neg(m_Reg(NegatedReg))));
778   EXPECT_EQ(NegatedReg, Copies[0]);
779 
780   // Don't match: G_SUB = %Reg, 0
781   auto NotNegInst1 = B.buildSub(s64, Copies[0], Zero);
782   EXPECT_FALSE(mi_match(NotNegInst1.getReg(0), *MRI, m_Neg(m_Reg(NegatedReg))));
783 
784   // Don't match: G_SUB = 42, %Reg
785   auto FortyTwo = B.buildConstant(LLT::scalar(64), 42);
786   auto NotNegInst2 = B.buildSub(s64, FortyTwo, Copies[0]);
787   EXPECT_FALSE(mi_match(NotNegInst2.getReg(0), *MRI, m_Neg(m_Reg(NegatedReg))));
788 
789   // Complex testcase.
790   // %sub = G_SUB = 0, %negated_reg
791   // %add = G_ADD = %x, %sub
792   auto AddInst = B.buildAdd(s64, Copies[1], NegInst);
793   NegatedReg = Register();
794   EXPECT_TRUE(mi_match(AddInst.getReg(2), *MRI, m_Neg(m_Reg(NegatedReg))));
795   EXPECT_EQ(NegatedReg, Copies[0]);
796 }
797 
798 TEST_F(AArch64GISelMITest, MatchNot) {
799   setUp();
800   if (!TM)
801     return;
802 
803   LLT s64 = LLT::scalar(64);
804   auto AllOnes = B.buildConstant(LLT::scalar(64), -1);
805   auto NotInst1 = B.buildXor(s64, Copies[0], AllOnes);
806   Register NotReg;
807 
808   // Match: G_XOR %NotReg, -1
809   EXPECT_TRUE(mi_match(NotInst1.getReg(0), *MRI, m_Not(m_Reg(NotReg))));
810   EXPECT_EQ(NotReg, Copies[0]);
811 
812   // Match: G_XOR -1, %NotReg
813   auto NotInst2 = B.buildXor(s64, AllOnes, Copies[1]);
814   EXPECT_TRUE(mi_match(NotInst2.getReg(0), *MRI, m_Not(m_Reg(NotReg))));
815   EXPECT_EQ(NotReg, Copies[1]);
816 
817   // Don't match: G_XOR %NotReg, 42
818   auto FortyTwo = B.buildConstant(LLT::scalar(64), 42);
819   auto WrongCst = B.buildXor(s64, Copies[0], FortyTwo);
820   EXPECT_FALSE(mi_match(WrongCst.getReg(0), *MRI, m_Not(m_Reg(NotReg))));
821 
822   // Complex testcase.
823   // %xor = G_XOR %NotReg, -1
824   // %add = G_ADD %x, %xor
825   auto AddInst = B.buildAdd(s64, Copies[1], NotInst1);
826   NotReg = Register();
827   EXPECT_TRUE(mi_match(AddInst.getReg(2), *MRI, m_Not(m_Reg(NotReg))));
828   EXPECT_EQ(NotReg, Copies[0]);
829 }
830 
831 TEST_F(AArch64GISelMITest, MatchSpecificReg) {
832   setUp();
833   if (!TM)
834     return;
835   auto Cst1 = B.buildConstant(LLT::scalar(64), 42);
836   auto Cst2 = B.buildConstant(LLT::scalar(64), 314);
837   Register Reg = Cst1.getReg(0);
838   // Basic case: Same register twice.
839   EXPECT_TRUE(mi_match(Reg, *MRI, m_SpecificReg(Reg)));
840   // Basic case: Two explicitly different registers.
841   EXPECT_FALSE(mi_match(Reg, *MRI, m_SpecificReg(Cst2.getReg(0))));
842   // Check that we can tell that an instruction uses a specific register.
843   auto Add = B.buildAdd(LLT::scalar(64), Cst1, Cst2);
844   EXPECT_TRUE(mi_match(Add.getReg(0), *MRI, m_GAdd(m_SpecificReg(Reg), m_Reg())));
845 }
846 
847 } // namespace
848 
849 int main(int argc, char **argv) {
850   ::testing::InitGoogleTest(&argc, argv);
851   initLLVM();
852   return RUN_ALL_TESTS();
853 }
854