xref: /llvm-project/llvm/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp (revision a74f825a7acec4962bb4c172da7ed0028f7b4d44)
1 //===- PatternMatchTest.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "GISelMITest.h"
10 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12 #include "llvm/CodeGen/GlobalISel/Utils.h"
13 #include "llvm/CodeGen/MIRParser/MIRParser.h"
14 #include "llvm/CodeGen/MachineFunction.h"
15 #include "llvm/CodeGen/MachineModuleInfo.h"
16 #include "llvm/CodeGen/TargetFrameLowering.h"
17 #include "llvm/CodeGen/TargetInstrInfo.h"
18 #include "llvm/CodeGen/TargetLowering.h"
19 #include "llvm/CodeGen/TargetSubtargetInfo.h"
20 #include "llvm/MC/TargetRegistry.h"
21 #include "llvm/Support/SourceMgr.h"
22 #include "llvm/Support/TargetSelect.h"
23 #include "llvm/Target/TargetMachine.h"
24 #include "llvm/Target/TargetOptions.h"
25 #include "gtest/gtest.h"
26 
27 using namespace llvm;
28 using namespace MIPatternMatch;
29 
30 namespace {
31 
32 TEST_F(AArch64GISelMITest, MatchIntConstant) {
33   setUp();
34   if (!TM)
35     GTEST_SKIP();
36   auto MIBCst = B.buildConstant(LLT::scalar(64), 42);
37   int64_t Cst;
38   bool match = mi_match(MIBCst.getReg(0), *MRI, m_ICst(Cst));
39   EXPECT_TRUE(match);
40   EXPECT_EQ(Cst, 42);
41 }
42 
43 TEST_F(AArch64GISelMITest, MatchIntConstantRegister) {
44   setUp();
45   if (!TM)
46     GTEST_SKIP();
47   auto MIBCst = B.buildConstant(LLT::scalar(64), 42);
48   std::optional<ValueAndVReg> Src0;
49   bool match = mi_match(MIBCst.getReg(0), *MRI, m_GCst(Src0));
50   EXPECT_TRUE(match);
51   EXPECT_EQ(Src0->VReg, MIBCst.getReg(0));
52 }
53 
54 TEST_F(AArch64GISelMITest, MatchIntConstantSplat) {
55   setUp();
56   if (!TM)
57     GTEST_SKIP();
58 
59   LLT s64 = LLT::scalar(64);
60   LLT v2s64 = LLT::fixed_vector(2, s64);
61   LLT v4s64 = LLT::fixed_vector(4, s64);
62 
63   MachineInstrBuilder FortyTwoSplat =
64       B.buildSplatBuildVector(v4s64, B.buildConstant(s64, 42));
65   int64_t Cst;
66   EXPECT_TRUE(mi_match(FortyTwoSplat.getReg(0), *MRI, m_ICstOrSplat(Cst)));
67   EXPECT_EQ(Cst, 42);
68 
69   MachineInstrBuilder NonConstantSplat =
70       B.buildBuildVector(v4s64, {Copies[0], Copies[0], Copies[0], Copies[0]});
71   EXPECT_FALSE(mi_match(NonConstantSplat.getReg(0), *MRI, m_ICstOrSplat(Cst)));
72 
73   auto ICst = B.buildConstant(s64, 15).getReg(0);
74   auto SmallSplat = B.buildBuildVector(v2s64, {ICst, ICst}).getReg(0);
75   auto LargeSplat = B.buildConcatVectors(v4s64, {SmallSplat, SmallSplat});
76   EXPECT_TRUE(mi_match(LargeSplat.getReg(0), *MRI, m_ICstOrSplat(Cst)));
77 }
78 
79 TEST_F(AArch64GISelMITest, MachineInstrPtrBind) {
80   setUp();
81   if (!TM)
82     GTEST_SKIP();
83   auto MIBAdd = B.buildAdd(LLT::scalar(64), Copies[0], Copies[1]);
84   // Test 'MachineInstr *' bind.
85   // Default mi_match.
86   MachineInstr *MIPtr = MIBAdd.getInstr();
87   bool match = mi_match(MIPtr, *MRI, m_GAdd(m_Reg(), m_Reg()));
88   EXPECT_TRUE(match);
89   // Specialized mi_match for MachineInstr &.
90   MachineInstr &MI = *MIBAdd.getInstr();
91   match = mi_match(MI, *MRI, m_GAdd(m_Reg(), m_Reg()));
92   EXPECT_TRUE(match);
93   // MachineInstrBuilder has automatic conversion to MachineInstr *.
94   match = mi_match(MIBAdd, *MRI, m_GAdd(m_Reg(), m_Reg()));
95   EXPECT_TRUE(match);
96   // Match instruction without def.
97   auto MIBBrcond = B.buildBrCond(Copies[0], B.getMBB());
98   MachineInstr *MatchedMI;
99   match = mi_match(MIBBrcond, *MRI, m_MInstr(MatchedMI));
100   EXPECT_TRUE(match);
101   EXPECT_TRUE(MIBBrcond.getInstr() == MatchedMI);
102   // Match instruction with two defs.
103   auto MIBUAddO =
104       B.buildUAddo(LLT::scalar(64), LLT::scalar(1), Copies[0], Copies[1]);
105   match = mi_match(MIBUAddO, *MRI, m_MInstr(MatchedMI));
106   EXPECT_TRUE(match);
107   EXPECT_TRUE(MIBUAddO.getInstr() == MatchedMI);
108 }
109 
110 TEST_F(AArch64GISelMITest, MatchBinaryOp) {
111   setUp();
112   if (!TM)
113     GTEST_SKIP();
114   LLT s32 = LLT::scalar(32);
115   LLT s64 = LLT::scalar(64);
116   LLT p0 = LLT::pointer(0, 64);
117   auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]);
118   // Test case for no bind.
119   bool match =
120       mi_match(MIBAdd.getReg(0), *MRI, m_GAdd(m_Reg(), m_Reg()));
121   EXPECT_TRUE(match);
122   Register Src0, Src1, Src2;
123   match = mi_match(MIBAdd.getReg(0), *MRI,
124                    m_GAdd(m_Reg(Src0), m_Reg(Src1)));
125   EXPECT_TRUE(match);
126   EXPECT_EQ(Src0, Copies[0]);
127   EXPECT_EQ(Src1, Copies[1]);
128 
129   // Build MUL(ADD %0, %1), %2
130   auto MIBMul = B.buildMul(s64, MIBAdd, Copies[2]);
131 
132   // Try to match MUL.
133   match = mi_match(MIBMul.getReg(0), *MRI,
134                    m_GMul(m_Reg(Src0), m_Reg(Src1)));
135   EXPECT_TRUE(match);
136   EXPECT_EQ(Src0, MIBAdd.getReg(0));
137   EXPECT_EQ(Src1, Copies[2]);
138 
139   // Try to match MUL(ADD)
140   match = mi_match(MIBMul.getReg(0), *MRI,
141                    m_GMul(m_GAdd(m_Reg(Src0), m_Reg(Src1)), m_Reg(Src2)));
142   EXPECT_TRUE(match);
143   EXPECT_EQ(Src0, Copies[0]);
144   EXPECT_EQ(Src1, Copies[1]);
145   EXPECT_EQ(Src2, Copies[2]);
146 
147   // Test Commutativity.
148   auto MIBMul2 = B.buildMul(s64, Copies[0], B.buildConstant(s64, 42));
149   // Try to match MUL(Cst, Reg) on src of MUL(Reg, Cst) to validate
150   // commutativity.
151   int64_t Cst;
152   match = mi_match(MIBMul2.getReg(0), *MRI,
153                    m_GMul(m_ICst(Cst), m_Reg(Src0)));
154   EXPECT_TRUE(match);
155   EXPECT_EQ(Cst, 42);
156   EXPECT_EQ(Src0, Copies[0]);
157 
158   // Make sure commutative doesn't work with something like SUB.
159   auto MIBSub = B.buildSub(s64, Copies[0], B.buildConstant(s64, 42));
160   match = mi_match(MIBSub.getReg(0), *MRI,
161                    m_GSub(m_ICst(Cst), m_Reg(Src0)));
162   EXPECT_FALSE(match);
163 
164   auto MIBFMul = B.buildInstr(TargetOpcode::G_FMUL, {s64},
165                               {Copies[0], B.buildConstant(s64, 42)});
166   // Match and test commutativity for FMUL.
167   match = mi_match(MIBFMul.getReg(0), *MRI,
168                    m_GFMul(m_ICst(Cst), m_Reg(Src0)));
169   EXPECT_TRUE(match);
170   EXPECT_EQ(Cst, 42);
171   EXPECT_EQ(Src0, Copies[0]);
172 
173   // FSUB
174   auto MIBFSub = B.buildInstr(TargetOpcode::G_FSUB, {s64},
175                               {Copies[0], B.buildConstant(s64, 42)});
176   match = mi_match(MIBFSub.getReg(0), *MRI,
177                    m_GFSub(m_Reg(Src0), m_Reg()));
178   EXPECT_TRUE(match);
179   EXPECT_EQ(Src0, Copies[0]);
180 
181   // Build AND %0, %1
182   auto MIBAnd = B.buildAnd(s64, Copies[0], Copies[1]);
183   // Try to match AND.
184   match = mi_match(MIBAnd.getReg(0), *MRI,
185                    m_GAnd(m_Reg(Src0), m_Reg(Src1)));
186   EXPECT_TRUE(match);
187   EXPECT_EQ(Src0, Copies[0]);
188   EXPECT_EQ(Src1, Copies[1]);
189 
190   // Build OR %0, %1
191   auto MIBOr = B.buildOr(s64, Copies[0], Copies[1]);
192   // Try to match OR.
193   match = mi_match(MIBOr.getReg(0), *MRI,
194                    m_GOr(m_Reg(Src0), m_Reg(Src1)));
195   EXPECT_TRUE(match);
196   EXPECT_EQ(Src0, Copies[0]);
197   EXPECT_EQ(Src1, Copies[1]);
198 
199   // Match lshr, and make sure a different shift amount type works.
200   auto TruncCopy1 = B.buildTrunc(s32, Copies[1]);
201   auto LShr = B.buildLShr(s64, Copies[0], TruncCopy1);
202   match = mi_match(LShr.getReg(0), *MRI,
203                    m_GLShr(m_Reg(Src0), m_Reg(Src1)));
204   EXPECT_TRUE(match);
205   EXPECT_EQ(Src0, Copies[0]);
206   EXPECT_EQ(Src1, TruncCopy1.getReg(0));
207 
208   // Match shl, and make sure a different shift amount type works.
209   auto Shl = B.buildShl(s64, Copies[0], TruncCopy1);
210   match = mi_match(Shl.getReg(0), *MRI,
211                    m_GShl(m_Reg(Src0), m_Reg(Src1)));
212   EXPECT_TRUE(match);
213   EXPECT_EQ(Src0, Copies[0]);
214   EXPECT_EQ(Src1, TruncCopy1.getReg(0));
215 
216   // Build a G_PTR_ADD and check that we can match it.
217   auto PtrAdd = B.buildPtrAdd(p0, {B.buildUndef(p0)}, Copies[0]);
218   match = mi_match(PtrAdd.getReg(0), *MRI, m_GPtrAdd(m_Reg(Src0), m_Reg(Src1)));
219   EXPECT_TRUE(match);
220   EXPECT_EQ(Src0, PtrAdd->getOperand(1).getReg());
221   EXPECT_EQ(Src1, Copies[0]);
222 
223   auto MIBCst = B.buildConstant(s64, 42);
224   auto MIBAddCst = B.buildAdd(s64, MIBCst, Copies[0]);
225   auto MIBUnmerge = B.buildUnmerge({s32, s32}, B.buildConstant(s64, 42));
226 
227   // Match min/max, and make sure they're commutative.
228   auto SMin = B.buildSMin(s64, Copies[2], MIBAdd);
229   EXPECT_TRUE(mi_match(SMin.getReg(0), *MRI,
230                        m_GSMin(m_GAdd(m_Reg(Src1), m_Reg(Src2)), m_Reg(Src0))));
231   EXPECT_EQ(Src0, Copies[2]);
232   EXPECT_EQ(Src1, Copies[0]);
233   EXPECT_EQ(Src2, Copies[1]);
234   auto SMax = B.buildSMax(s64, Copies[2], MIBAdd);
235   EXPECT_TRUE(mi_match(SMax.getReg(0), *MRI,
236                        m_GSMax(m_GAdd(m_Reg(Src1), m_Reg(Src2)), m_Reg(Src0))));
237   EXPECT_EQ(Src0, Copies[2]);
238   EXPECT_EQ(Src1, Copies[0]);
239   EXPECT_EQ(Src2, Copies[1]);
240   auto UMin = B.buildUMin(s64, Copies[2], MIBAdd);
241   EXPECT_TRUE(mi_match(UMin.getReg(0), *MRI,
242                        m_GUMin(m_GAdd(m_Reg(Src1), m_Reg(Src2)), m_Reg(Src0))));
243   EXPECT_EQ(Src0, Copies[2]);
244   EXPECT_EQ(Src1, Copies[0]);
245   EXPECT_EQ(Src2, Copies[1]);
246   auto UMax = B.buildUMax(s64, Copies[2], MIBAdd);
247   EXPECT_TRUE(mi_match(UMax.getReg(0), *MRI,
248                        m_GUMax(m_GAdd(m_Reg(Src1), m_Reg(Src2)), m_Reg(Src0))));
249   EXPECT_EQ(Src0, Copies[2]);
250   EXPECT_EQ(Src1, Copies[0]);
251   EXPECT_EQ(Src2, Copies[1]);
252 
253   // m_BinOp with opcode.
254   // Match binary instruction, opcode and its non-commutative operands.
255   match = mi_match(MIBAddCst, *MRI,
256                    m_BinOp(TargetOpcode::G_ADD, m_ICst(Cst), m_Reg(Src0)));
257   EXPECT_TRUE(match);
258   EXPECT_EQ(Src0, Copies[0]);
259   EXPECT_EQ(Cst, 42);
260 
261   // Opcode doesn't match.
262   match = mi_match(MIBAddCst, *MRI,
263                    m_BinOp(TargetOpcode::G_MUL, m_ICst(Cst), m_Reg(Src0)));
264   EXPECT_FALSE(match);
265 
266   match = mi_match(MIBAddCst, *MRI,
267                    m_BinOp(TargetOpcode::G_ADD, m_Reg(Src0), m_ICst(Cst)));
268   EXPECT_FALSE(match);
269 
270   // Instruction is not binary.
271   match = mi_match(MIBCst, *MRI,
272                    m_BinOp(TargetOpcode::G_MUL, m_Reg(Src0), m_Reg(Src1)));
273   EXPECT_FALSE(match);
274   match = mi_match(MIBUnmerge, *MRI,
275                    m_BinOp(TargetOpcode::G_MUL, m_Reg(Src0), m_Reg(Src1)));
276   EXPECT_FALSE(match);
277 
278   // m_CommutativeBinOp with opcode.
279   match = mi_match(
280       MIBAddCst, *MRI,
281       m_CommutativeBinOp(TargetOpcode::G_ADD, m_ICst(Cst), m_Reg(Src0)));
282   EXPECT_TRUE(match);
283   EXPECT_EQ(Src0, Copies[0]);
284   EXPECT_EQ(Cst, 42);
285 
286   match = mi_match(
287       MIBAddCst, *MRI,
288       m_CommutativeBinOp(TargetOpcode::G_MUL, m_ICst(Cst), m_Reg(Src0)));
289   EXPECT_FALSE(match);
290 
291   match = mi_match(
292       MIBAddCst, *MRI,
293       m_CommutativeBinOp(TargetOpcode::G_ADD, m_Reg(Src0), m_ICst(Cst)));
294   EXPECT_TRUE(match);
295   EXPECT_EQ(Src0, Copies[0]);
296   EXPECT_EQ(Cst, 42);
297 
298   match = mi_match(
299       MIBCst, *MRI,
300       m_CommutativeBinOp(TargetOpcode::G_MUL, m_Reg(Src0), m_Reg(Src1)));
301   EXPECT_FALSE(match);
302   match = mi_match(
303       MIBUnmerge, *MRI,
304       m_CommutativeBinOp(TargetOpcode::G_MUL, m_Reg(Src0), m_Reg(Src1)));
305   EXPECT_FALSE(match);
306 }
307 
308 TEST_F(AArch64GISelMITest, MatchICmp) {
309   setUp();
310   if (!TM)
311     GTEST_SKIP();
312 
313   const LLT s1 = LLT::scalar(1);
314   auto CmpEq = B.buildICmp(CmpInst::ICMP_EQ, s1, Copies[0], Copies[1]);
315 
316   // Check match any predicate.
317   bool match =
318       mi_match(CmpEq.getReg(0), *MRI, m_GICmp(m_Pred(), m_Reg(), m_Reg()));
319   EXPECT_TRUE(match);
320 
321   // Check we get the predicate and registers.
322   CmpInst::Predicate Pred;
323   Register Reg0;
324   Register Reg1;
325   match = mi_match(CmpEq.getReg(0), *MRI,
326                    m_GICmp(m_Pred(Pred), m_Reg(Reg0), m_Reg(Reg1)));
327   EXPECT_TRUE(match);
328   EXPECT_EQ(CmpInst::ICMP_EQ, Pred);
329   EXPECT_EQ(Copies[0], Reg0);
330   EXPECT_EQ(Copies[1], Reg1);
331 }
332 
333 TEST_F(AArch64GISelMITest, MatchFCmp) {
334   setUp();
335   if (!TM)
336     GTEST_SKIP();
337 
338   const LLT s1 = LLT::scalar(1);
339   auto CmpEq = B.buildFCmp(CmpInst::FCMP_OEQ, s1, Copies[0], Copies[1]);
340 
341   // Check match any predicate.
342   bool match =
343       mi_match(CmpEq.getReg(0), *MRI, m_GFCmp(m_Pred(), m_Reg(), m_Reg()));
344   EXPECT_TRUE(match);
345 
346   // Check we get the predicate and registers.
347   CmpInst::Predicate Pred;
348   Register Reg0;
349   Register Reg1;
350   match = mi_match(CmpEq.getReg(0), *MRI,
351                    m_GFCmp(m_Pred(Pred), m_Reg(Reg0), m_Reg(Reg1)));
352   EXPECT_TRUE(match);
353   EXPECT_EQ(CmpInst::FCMP_OEQ, Pred);
354   EXPECT_EQ(Copies[0], Reg0);
355   EXPECT_EQ(Copies[1], Reg1);
356 }
357 
358 TEST_F(AArch64GISelMITest, MatcCommutativeICmp) {
359   setUp();
360   if (!TM)
361     GTEST_SKIP();
362   const LLT s1 = LLT::scalar(1);
363   Register LHS = Copies[0];
364   Register RHS = Copies[1];
365   CmpInst::Predicate MatchedPred;
366   bool Match = false;
367   for (unsigned P = CmpInst::Predicate::FIRST_ICMP_PREDICATE;
368        P < CmpInst::Predicate::LAST_ICMP_PREDICATE; ++P) {
369     auto CurrPred = static_cast<CmpInst::Predicate>(P);
370     auto Cmp = B.buildICmp(CurrPred, s1, LHS, RHS);
371     // Basic matching.
372     Match = mi_match(
373         Cmp.getReg(0), *MRI,
374         m_c_GICmp(m_Pred(MatchedPred), m_SpecificReg(LHS), m_SpecificReg(RHS)));
375     EXPECT_TRUE(Match);
376     EXPECT_EQ(MatchedPred, CurrPred);
377     // Commuting operands should still match, but the predicate should be
378     // swapped.
379     Match = mi_match(
380         Cmp.getReg(0), *MRI,
381         m_c_GICmp(m_Pred(MatchedPred), m_SpecificReg(RHS), m_SpecificReg(LHS)));
382     EXPECT_TRUE(Match);
383     EXPECT_EQ(MatchedPred, CmpInst::getSwappedPredicate(CurrPred));
384   }
385 }
386 
387 TEST_F(AArch64GISelMITest, MatcCommutativeFCmp) {
388   setUp();
389   if (!TM)
390     GTEST_SKIP();
391   const LLT s1 = LLT::scalar(1);
392   Register LHS = Copies[0];
393   Register RHS = Copies[1];
394   CmpInst::Predicate MatchedPred;
395   bool Match = false;
396   for (unsigned P = CmpInst::Predicate::FIRST_FCMP_PREDICATE;
397        P < CmpInst::Predicate::LAST_FCMP_PREDICATE; ++P) {
398     auto CurrPred = static_cast<CmpInst::Predicate>(P);
399     auto Cmp = B.buildFCmp(CurrPred, s1, LHS, RHS);
400     // Basic matching.
401     Match = mi_match(
402         Cmp.getReg(0), *MRI,
403         m_c_GFCmp(m_Pred(MatchedPred), m_SpecificReg(LHS), m_SpecificReg(RHS)));
404     EXPECT_TRUE(Match);
405     EXPECT_EQ(MatchedPred, CurrPred);
406     // Commuting operands should still match, but the predicate should be
407     // swapped.
408     Match = mi_match(
409         Cmp.getReg(0), *MRI,
410         m_c_GFCmp(m_Pred(MatchedPred), m_SpecificReg(RHS), m_SpecificReg(LHS)));
411     EXPECT_TRUE(Match);
412     EXPECT_EQ(MatchedPred, CmpInst::getSwappedPredicate(CurrPred));
413   }
414 }
415 
416 TEST_F(AArch64GISelMITest, MatchFPUnaryOp) {
417   setUp();
418   if (!TM)
419     GTEST_SKIP();
420 
421   // Truncate s64 to s32.
422   LLT s32 = LLT::scalar(32);
423   auto Copy0s32 = B.buildFPTrunc(s32, Copies[0]);
424 
425   // Match G_FABS.
426   auto MIBFabs = B.buildInstr(TargetOpcode::G_FABS, {s32}, {Copy0s32});
427   bool match =
428       mi_match(MIBFabs.getReg(0), *MRI, m_GFabs(m_Reg()));
429   EXPECT_TRUE(match);
430 
431   Register Src;
432   auto MIBFNeg = B.buildInstr(TargetOpcode::G_FNEG, {s32}, {Copy0s32});
433   match = mi_match(MIBFNeg.getReg(0), *MRI, m_GFNeg(m_Reg(Src)));
434   EXPECT_TRUE(match);
435   EXPECT_EQ(Src, Copy0s32.getReg(0));
436 
437   match = mi_match(MIBFabs.getReg(0), *MRI, m_GFabs(m_Reg(Src)));
438   EXPECT_TRUE(match);
439   EXPECT_EQ(Src, Copy0s32.getReg(0));
440 
441   // Build and match FConstant.
442   auto MIBFCst = B.buildFConstant(s32, .5);
443   const ConstantFP *TmpFP{};
444   match = mi_match(MIBFCst.getReg(0), *MRI, m_GFCst(TmpFP));
445   EXPECT_TRUE(match);
446   EXPECT_TRUE(TmpFP);
447   APFloat APF((float).5);
448   auto *CFP = ConstantFP::get(Context, APF);
449   EXPECT_EQ(CFP, TmpFP);
450 
451   // Build double float.
452   LLT s64 = LLT::scalar(64);
453   auto MIBFCst64 = B.buildFConstant(s64, .5);
454   const ConstantFP *TmpFP64{};
455   match = mi_match(MIBFCst64.getReg(0), *MRI, m_GFCst(TmpFP64));
456   EXPECT_TRUE(match);
457   EXPECT_TRUE(TmpFP64);
458   APFloat APF64(.5);
459   auto CFP64 = ConstantFP::get(Context, APF64);
460   EXPECT_EQ(CFP64, TmpFP64);
461   EXPECT_NE(TmpFP64, TmpFP);
462 
463   // Build half float.
464   LLT s16 = LLT::scalar(16);
465   auto MIBFCst16 = B.buildFConstant(s16, .5);
466   const ConstantFP *TmpFP16{};
467   match = mi_match(MIBFCst16.getReg(0), *MRI, m_GFCst(TmpFP16));
468   EXPECT_TRUE(match);
469   EXPECT_TRUE(TmpFP16);
470   bool Ignored;
471   APFloat APF16(.5);
472   APF16.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
473   auto CFP16 = ConstantFP::get(Context, APF16);
474   EXPECT_EQ(TmpFP16, CFP16);
475   EXPECT_NE(TmpFP16, TmpFP);
476 }
477 
478 TEST_F(AArch64GISelMITest, MatchExtendsTrunc) {
479   setUp();
480   if (!TM)
481     GTEST_SKIP();
482 
483   LLT s64 = LLT::scalar(64);
484   LLT s32 = LLT::scalar(32);
485 
486   auto MIBTrunc = B.buildTrunc(s32, Copies[0]);
487   auto MIBAExt = B.buildAnyExt(s64, MIBTrunc);
488   auto MIBZExt = B.buildZExt(s64, MIBTrunc);
489   auto MIBSExt = B.buildSExt(s64, MIBTrunc);
490   Register Src0;
491   bool match =
492       mi_match(MIBTrunc.getReg(0), *MRI, m_GTrunc(m_Reg(Src0)));
493   EXPECT_TRUE(match);
494   EXPECT_EQ(Src0, Copies[0]);
495   match =
496       mi_match(MIBAExt.getReg(0), *MRI, m_GAnyExt(m_Reg(Src0)));
497   EXPECT_TRUE(match);
498   EXPECT_EQ(Src0, MIBTrunc.getReg(0));
499 
500   match = mi_match(MIBSExt.getReg(0), *MRI, m_GSExt(m_Reg(Src0)));
501   EXPECT_TRUE(match);
502   EXPECT_EQ(Src0, MIBTrunc.getReg(0));
503 
504   match = mi_match(MIBZExt.getReg(0), *MRI, m_GZExt(m_Reg(Src0)));
505   EXPECT_TRUE(match);
506   EXPECT_EQ(Src0, MIBTrunc.getReg(0));
507 
508   // Match ext(trunc src)
509   match = mi_match(MIBAExt.getReg(0), *MRI,
510                    m_GAnyExt(m_GTrunc(m_Reg(Src0))));
511   EXPECT_TRUE(match);
512   EXPECT_EQ(Src0, Copies[0]);
513 
514   match = mi_match(MIBSExt.getReg(0), *MRI,
515                    m_GSExt(m_GTrunc(m_Reg(Src0))));
516   EXPECT_TRUE(match);
517   EXPECT_EQ(Src0, Copies[0]);
518 
519   match = mi_match(MIBZExt.getReg(0), *MRI,
520                    m_GZExt(m_GTrunc(m_Reg(Src0))));
521   EXPECT_TRUE(match);
522   EXPECT_EQ(Src0, Copies[0]);
523 }
524 
525 TEST_F(AArch64GISelMITest, MatchSpecificType) {
526   setUp();
527   if (!TM)
528     GTEST_SKIP();
529 
530   // Try to match a 64bit add.
531   LLT s64 = LLT::scalar(64);
532   LLT s32 = LLT::scalar(32);
533   auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]);
534   EXPECT_FALSE(mi_match(MIBAdd.getReg(0), *MRI,
535                         m_GAdd(m_SpecificType(s32), m_Reg())));
536   EXPECT_TRUE(mi_match(MIBAdd.getReg(0), *MRI,
537                        m_GAdd(m_SpecificType(s64), m_Reg())));
538 
539   // Try to match the destination type of a bitcast.
540   LLT v2s32 = LLT::fixed_vector(2, 32);
541   auto MIBCast = B.buildCast(v2s32, Copies[0]);
542   EXPECT_TRUE(
543       mi_match(MIBCast.getReg(0), *MRI, m_GBitcast(m_Reg())));
544   EXPECT_TRUE(
545       mi_match(MIBCast.getReg(0), *MRI, m_SpecificType(v2s32)));
546   EXPECT_TRUE(
547       mi_match(MIBCast.getReg(1), *MRI, m_SpecificType(s64)));
548 
549   // Build a PTRToInt and INTTOPTR and match and test them.
550   LLT PtrTy = LLT::pointer(0, 64);
551   auto MIBIntToPtr = B.buildCast(PtrTy, Copies[0]);
552   auto MIBPtrToInt = B.buildCast(s64, MIBIntToPtr);
553   Register Src0;
554 
555   // match the ptrtoint(inttoptr reg)
556   bool match = mi_match(MIBPtrToInt.getReg(0), *MRI,
557                         m_GPtrToInt(m_GIntToPtr(m_Reg(Src0))));
558   EXPECT_TRUE(match);
559   EXPECT_EQ(Src0, Copies[0]);
560 }
561 
562 TEST_F(AArch64GISelMITest, MatchCombinators) {
563   setUp();
564   if (!TM)
565     GTEST_SKIP();
566 
567   LLT s64 = LLT::scalar(64);
568   LLT s32 = LLT::scalar(32);
569   auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]);
570   Register Src0, Src1;
571   bool match =
572       mi_match(MIBAdd.getReg(0), *MRI,
573                m_all_of(m_SpecificType(s64), m_GAdd(m_Reg(Src0), m_Reg(Src1))));
574   EXPECT_TRUE(match);
575   EXPECT_EQ(Src0, Copies[0]);
576   EXPECT_EQ(Src1, Copies[1]);
577   // Check for s32 (which should fail).
578   match =
579       mi_match(MIBAdd.getReg(0), *MRI,
580                m_all_of(m_SpecificType(s32), m_GAdd(m_Reg(Src0), m_Reg(Src1))));
581   EXPECT_FALSE(match);
582   match =
583       mi_match(MIBAdd.getReg(0), *MRI,
584                m_any_of(m_SpecificType(s32), m_GAdd(m_Reg(Src0), m_Reg(Src1))));
585   EXPECT_TRUE(match);
586   EXPECT_EQ(Src0, Copies[0]);
587   EXPECT_EQ(Src1, Copies[1]);
588 
589   // Match a case where none of the predicates hold true.
590   match = mi_match(
591       MIBAdd.getReg(0), *MRI,
592       m_any_of(m_SpecificType(LLT::scalar(16)), m_GSub(m_Reg(), m_Reg())));
593   EXPECT_FALSE(match);
594 }
595 
596 TEST_F(AArch64GISelMITest, MatchMiscellaneous) {
597   setUp();
598   if (!TM)
599     GTEST_SKIP();
600 
601   LLT s64 = LLT::scalar(64);
602   auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]);
603   Register Reg = MIBAdd.getReg(0);
604 
605   // Extract the type.
606   LLT Ty;
607   EXPECT_TRUE(mi_match(Reg, *MRI, m_GAdd(m_Type(Ty), m_Reg())));
608   EXPECT_EQ(Ty, s64);
609 
610   // Only one use of Reg.
611   B.buildCast(LLT::pointer(0, 32), MIBAdd);
612   EXPECT_TRUE(mi_match(Reg, *MRI, m_OneUse(m_GAdd(m_Reg(), m_Reg()))));
613   EXPECT_TRUE(mi_match(Reg, *MRI, m_OneNonDBGUse(m_GAdd(m_Reg(), m_Reg()))));
614 
615   // Add multiple debug uses of Reg.
616   B.buildInstr(TargetOpcode::DBG_VALUE, {}, {Reg});
617   B.buildInstr(TargetOpcode::DBG_VALUE, {}, {Reg});
618 
619   EXPECT_FALSE(mi_match(Reg, *MRI, m_OneUse(m_GAdd(m_Reg(), m_Reg()))));
620   EXPECT_TRUE(mi_match(Reg, *MRI, m_OneNonDBGUse(m_GAdd(m_Reg(), m_Reg()))));
621 
622   // Multiple non-debug uses of Reg.
623   B.buildCast(LLT::pointer(1, 32), MIBAdd);
624   EXPECT_FALSE(mi_match(Reg, *MRI, m_OneUse(m_GAdd(m_Reg(), m_Reg()))));
625   EXPECT_FALSE(mi_match(Reg, *MRI, m_OneNonDBGUse(m_GAdd(m_Reg(), m_Reg()))));
626 }
627 
628 TEST_F(AArch64GISelMITest, MatchSpecificConstant) {
629   setUp();
630   if (!TM)
631     GTEST_SKIP();
632 
633   // Basic case: Can we match a G_CONSTANT with a specific value?
634   auto FortyTwo = B.buildConstant(LLT::scalar(64), 42);
635   EXPECT_TRUE(mi_match(FortyTwo.getReg(0), *MRI, m_SpecificICst(42)));
636   EXPECT_FALSE(mi_match(FortyTwo.getReg(0), *MRI, m_SpecificICst(123)));
637 
638   // Test that this works inside of a more complex pattern.
639   LLT s64 = LLT::scalar(64);
640   auto MIBAdd = B.buildAdd(s64, Copies[0], FortyTwo);
641   EXPECT_TRUE(mi_match(MIBAdd.getReg(2), *MRI, m_SpecificICst(42)));
642 
643   // Wrong constant.
644   EXPECT_FALSE(mi_match(MIBAdd.getReg(2), *MRI, m_SpecificICst(123)));
645 
646   // No constant on the LHS.
647   EXPECT_FALSE(mi_match(MIBAdd.getReg(1), *MRI, m_SpecificICst(42)));
648 }
649 
650 TEST_F(AArch64GISelMITest, MatchSpecificConstantSplat) {
651   setUp();
652   if (!TM)
653     GTEST_SKIP();
654 
655   LLT s64 = LLT::scalar(64);
656   LLT v4s64 = LLT::fixed_vector(4, s64);
657 
658   MachineInstrBuilder FortyTwoSplat =
659       B.buildSplatBuildVector(v4s64, B.buildConstant(s64, 42));
660   MachineInstrBuilder FortyTwo = B.buildConstant(s64, 42);
661 
662   EXPECT_TRUE(mi_match(FortyTwoSplat.getReg(0), *MRI, m_SpecificICstSplat(42)));
663   EXPECT_FALSE(
664       mi_match(FortyTwoSplat.getReg(0), *MRI, m_SpecificICstSplat(43)));
665   EXPECT_FALSE(mi_match(FortyTwo.getReg(0), *MRI, m_SpecificICstSplat(42)));
666 
667   MachineInstrBuilder NonConstantSplat =
668       B.buildBuildVector(v4s64, {Copies[0], Copies[0], Copies[0], Copies[0]});
669 
670   MachineInstrBuilder AddSplat =
671       B.buildAdd(v4s64, NonConstantSplat, FortyTwoSplat);
672   EXPECT_TRUE(mi_match(AddSplat.getReg(2), *MRI, m_SpecificICstSplat(42)));
673   EXPECT_FALSE(mi_match(AddSplat.getReg(2), *MRI, m_SpecificICstSplat(43)));
674   EXPECT_FALSE(mi_match(AddSplat.getReg(1), *MRI, m_SpecificICstSplat(42)));
675 
676   MachineInstrBuilder Add = B.buildAdd(s64, Copies[0], FortyTwo);
677   EXPECT_FALSE(mi_match(Add.getReg(2), *MRI, m_SpecificICstSplat(42)));
678 }
679 
680 TEST_F(AArch64GISelMITest, MatchSpecificConstantOrSplat) {
681   setUp();
682   if (!TM)
683     GTEST_SKIP();
684 
685   LLT s64 = LLT::scalar(64);
686   LLT v4s64 = LLT::fixed_vector(4, s64);
687 
688   MachineInstrBuilder FortyTwoSplat =
689       B.buildSplatBuildVector(v4s64, B.buildConstant(s64, 42));
690   MachineInstrBuilder FortyTwo = B.buildConstant(s64, 42);
691 
692   EXPECT_TRUE(
693       mi_match(FortyTwoSplat.getReg(0), *MRI, m_SpecificICstOrSplat(42)));
694   EXPECT_FALSE(
695       mi_match(FortyTwoSplat.getReg(0), *MRI, m_SpecificICstOrSplat(43)));
696   EXPECT_TRUE(mi_match(FortyTwo.getReg(0), *MRI, m_SpecificICstOrSplat(42)));
697 
698   MachineInstrBuilder NonConstantSplat =
699       B.buildBuildVector(v4s64, {Copies[0], Copies[0], Copies[0], Copies[0]});
700 
701   MachineInstrBuilder AddSplat =
702       B.buildAdd(v4s64, NonConstantSplat, FortyTwoSplat);
703   EXPECT_TRUE(mi_match(AddSplat.getReg(2), *MRI, m_SpecificICstOrSplat(42)));
704   EXPECT_FALSE(mi_match(AddSplat.getReg(2), *MRI, m_SpecificICstOrSplat(43)));
705   EXPECT_FALSE(mi_match(AddSplat.getReg(1), *MRI, m_SpecificICstOrSplat(42)));
706 
707   MachineInstrBuilder Add = B.buildAdd(s64, Copies[0], FortyTwo);
708   EXPECT_TRUE(mi_match(Add.getReg(2), *MRI, m_SpecificICstOrSplat(42)));
709 }
710 
711 TEST_F(AArch64GISelMITest, MatchZeroInt) {
712   setUp();
713   if (!TM)
714     GTEST_SKIP();
715   auto Zero = B.buildConstant(LLT::scalar(64), 0);
716   EXPECT_TRUE(mi_match(Zero.getReg(0), *MRI, m_ZeroInt()));
717 
718   auto FortyTwo = B.buildConstant(LLT::scalar(64), 42);
719   EXPECT_FALSE(mi_match(FortyTwo.getReg(0), *MRI, m_ZeroInt()));
720 }
721 
722 TEST_F(AArch64GISelMITest, MatchAllOnesInt) {
723   setUp();
724   if (!TM)
725     GTEST_SKIP();
726   auto AllOnes = B.buildConstant(LLT::scalar(64), -1);
727   EXPECT_TRUE(mi_match(AllOnes.getReg(0), *MRI, m_AllOnesInt()));
728 
729   auto FortyTwo = B.buildConstant(LLT::scalar(64), 42);
730   EXPECT_FALSE(mi_match(FortyTwo.getReg(0), *MRI, m_AllOnesInt()));
731 }
732 
733 TEST_F(AArch64GISelMITest, MatchFPOrIntConst) {
734   setUp();
735   if (!TM)
736     GTEST_SKIP();
737 
738   Register IntOne = B.buildConstant(LLT::scalar(64), 1).getReg(0);
739   Register FPOne = B.buildFConstant(LLT::scalar(64), 1.0).getReg(0);
740   std::optional<ValueAndVReg> ValReg;
741   std::optional<FPValueAndVReg> FValReg;
742 
743   EXPECT_TRUE(mi_match(IntOne, *MRI, m_GCst(ValReg)));
744   EXPECT_EQ(IntOne, ValReg->VReg);
745   EXPECT_FALSE(mi_match(IntOne, *MRI, m_GFCst(FValReg)));
746 
747   EXPECT_FALSE(mi_match(FPOne, *MRI, m_GCst(ValReg)));
748   EXPECT_TRUE(mi_match(FPOne, *MRI, m_GFCst(FValReg)));
749   EXPECT_EQ(FPOne, FValReg->VReg);
750 }
751 
752 TEST_F(AArch64GISelMITest, MatchConstantSplat) {
753   setUp();
754   if (!TM)
755     GTEST_SKIP();
756 
757   LLT s64 = LLT::scalar(64);
758   LLT v2s64 = LLT::fixed_vector(2, 64);
759   LLT v4s64 = LLT::fixed_vector(4, 64);
760 
761   Register FPOne = B.buildFConstant(s64, 1.0).getReg(0);
762   Register FPZero = B.buildFConstant(s64, 0.0).getReg(0);
763   Register Undef = B.buildUndef(s64).getReg(0);
764   std::optional<FPValueAndVReg> FValReg;
765 
766   // GFCstOrSplatGFCstMatch allows undef as part of splat. Undef often comes
767   // from padding to legalize into available operation and then ignore added
768   // elements e.g. v3s64 to v4s64.
769 
770   EXPECT_TRUE(mi_match(FPZero, *MRI, GFCstOrSplatGFCstMatch(FValReg)));
771   EXPECT_EQ(FPZero, FValReg->VReg);
772 
773   EXPECT_FALSE(mi_match(Undef, *MRI, GFCstOrSplatGFCstMatch(FValReg)));
774 
775   auto ZeroSplat = B.buildBuildVector(v4s64, {FPZero, FPZero, FPZero, FPZero});
776   EXPECT_TRUE(
777       mi_match(ZeroSplat.getReg(0), *MRI, GFCstOrSplatGFCstMatch(FValReg)));
778   EXPECT_EQ(FPZero, FValReg->VReg);
779 
780   auto ZeroUndef = B.buildBuildVector(v4s64, {FPZero, FPZero, FPZero, Undef});
781   EXPECT_TRUE(
782       mi_match(ZeroUndef.getReg(0), *MRI, GFCstOrSplatGFCstMatch(FValReg)));
783   EXPECT_EQ(FPZero, FValReg->VReg);
784 
785   // All undefs are not constant splat.
786   auto UndefSplat = B.buildBuildVector(v4s64, {Undef, Undef, Undef, Undef});
787   EXPECT_FALSE(
788       mi_match(UndefSplat.getReg(0), *MRI, GFCstOrSplatGFCstMatch(FValReg)));
789 
790   auto ZeroOne = B.buildBuildVector(v4s64, {FPZero, FPZero, FPZero, FPOne});
791   EXPECT_FALSE(
792       mi_match(ZeroOne.getReg(0), *MRI, GFCstOrSplatGFCstMatch(FValReg)));
793 
794   auto NonConstantSplat =
795       B.buildBuildVector(v4s64, {Copies[0], Copies[0], Copies[0], Copies[0]});
796   EXPECT_FALSE(mi_match(NonConstantSplat.getReg(0), *MRI,
797                         GFCstOrSplatGFCstMatch(FValReg)));
798 
799   auto Mixed = B.buildBuildVector(v4s64, {FPZero, FPZero, FPZero, Copies[0]});
800   EXPECT_FALSE(
801       mi_match(Mixed.getReg(0), *MRI, GFCstOrSplatGFCstMatch(FValReg)));
802 
803   // Look through G_CONCAT_VECTORS.
804   auto SmallZeroSplat = B.buildBuildVector(v2s64, {FPZero, FPZero}).getReg(0);
805   auto LargeZeroSplat =
806       B.buildConcatVectors(v4s64, {SmallZeroSplat, SmallZeroSplat});
807   EXPECT_TRUE(mi_match(LargeZeroSplat.getReg(0), *MRI,
808                        GFCstOrSplatGFCstMatch(FValReg)));
809 
810   auto SmallZeroSplat2 = B.buildBuildVector(v2s64, {FPZero, FPZero}).getReg(0);
811   auto SmallZeroSplat3 = B.buildCopy(v2s64, SmallZeroSplat).getReg(0);
812   auto LargeZeroSplat2 =
813       B.buildConcatVectors(v4s64, {SmallZeroSplat2, SmallZeroSplat3});
814   EXPECT_TRUE(mi_match(LargeZeroSplat2.getReg(0), *MRI,
815                        GFCstOrSplatGFCstMatch(FValReg)));
816 
817   // Not all G_CONCAT_VECTORS are splats.
818   auto SmallOneSplat = B.buildBuildVector(v2s64, {FPOne, FPOne}).getReg(0);
819   auto LargeMixedSplat =
820       B.buildConcatVectors(v4s64, {SmallZeroSplat, SmallOneSplat});
821   EXPECT_FALSE(mi_match(LargeMixedSplat.getReg(0), *MRI,
822                         GFCstOrSplatGFCstMatch(FValReg)));
823 
824   auto SmallMixedSplat = B.buildBuildVector(v2s64, {FPOne, FPZero}).getReg(0);
825   auto LargeSplat =
826       B.buildConcatVectors(v4s64, {SmallMixedSplat, SmallMixedSplat});
827   EXPECT_FALSE(
828       mi_match(LargeSplat.getReg(0), *MRI, GFCstOrSplatGFCstMatch(FValReg)));
829 
830   auto SmallUndefSplat = B.buildBuildVector(v2s64, {Undef, Undef}).getReg(0);
831   auto LargeUndefSplat =
832       B.buildConcatVectors(v4s64, {SmallUndefSplat, SmallUndefSplat});
833   EXPECT_FALSE(mi_match(LargeUndefSplat.getReg(0), *MRI,
834                         GFCstOrSplatGFCstMatch(FValReg)));
835 
836   auto UndefVec = B.buildUndef(v2s64).getReg(0);
837   auto LargeUndefSplat2 = B.buildConcatVectors(v4s64, {UndefVec, UndefVec});
838   EXPECT_FALSE(mi_match(LargeUndefSplat2.getReg(0), *MRI,
839                         GFCstOrSplatGFCstMatch(FValReg)));
840 }
841 
842 TEST_F(AArch64GISelMITest, MatchNeg) {
843   setUp();
844   if (!TM)
845     GTEST_SKIP();
846 
847   LLT s64 = LLT::scalar(64);
848   auto Zero = B.buildConstant(LLT::scalar(64), 0);
849   auto NegInst = B.buildSub(s64, Zero, Copies[0]);
850   Register NegatedReg;
851 
852   // Match: G_SUB = 0, %Reg
853   EXPECT_TRUE(mi_match(NegInst.getReg(0), *MRI, m_Neg(m_Reg(NegatedReg))));
854   EXPECT_EQ(NegatedReg, Copies[0]);
855 
856   // Don't match: G_SUB = %Reg, 0
857   auto NotNegInst1 = B.buildSub(s64, Copies[0], Zero);
858   EXPECT_FALSE(mi_match(NotNegInst1.getReg(0), *MRI, m_Neg(m_Reg(NegatedReg))));
859 
860   // Don't match: G_SUB = 42, %Reg
861   auto FortyTwo = B.buildConstant(LLT::scalar(64), 42);
862   auto NotNegInst2 = B.buildSub(s64, FortyTwo, Copies[0]);
863   EXPECT_FALSE(mi_match(NotNegInst2.getReg(0), *MRI, m_Neg(m_Reg(NegatedReg))));
864 
865   // Complex testcase.
866   // %sub = G_SUB = 0, %negated_reg
867   // %add = G_ADD = %x, %sub
868   auto AddInst = B.buildAdd(s64, Copies[1], NegInst);
869   NegatedReg = Register();
870   EXPECT_TRUE(mi_match(AddInst.getReg(2), *MRI, m_Neg(m_Reg(NegatedReg))));
871   EXPECT_EQ(NegatedReg, Copies[0]);
872 }
873 
874 TEST_F(AArch64GISelMITest, MatchNot) {
875   setUp();
876   if (!TM)
877     GTEST_SKIP();
878 
879   LLT s64 = LLT::scalar(64);
880   auto AllOnes = B.buildConstant(LLT::scalar(64), -1);
881   auto NotInst1 = B.buildXor(s64, Copies[0], AllOnes);
882   Register NotReg;
883 
884   // Match: G_XOR %NotReg, -1
885   EXPECT_TRUE(mi_match(NotInst1.getReg(0), *MRI, m_Not(m_Reg(NotReg))));
886   EXPECT_EQ(NotReg, Copies[0]);
887 
888   // Match: G_XOR -1, %NotReg
889   auto NotInst2 = B.buildXor(s64, AllOnes, Copies[1]);
890   EXPECT_TRUE(mi_match(NotInst2.getReg(0), *MRI, m_Not(m_Reg(NotReg))));
891   EXPECT_EQ(NotReg, Copies[1]);
892 
893   // Don't match: G_XOR %NotReg, 42
894   auto FortyTwo = B.buildConstant(LLT::scalar(64), 42);
895   auto WrongCst = B.buildXor(s64, Copies[0], FortyTwo);
896   EXPECT_FALSE(mi_match(WrongCst.getReg(0), *MRI, m_Not(m_Reg(NotReg))));
897 
898   // Complex testcase.
899   // %xor = G_XOR %NotReg, -1
900   // %add = G_ADD %x, %xor
901   auto AddInst = B.buildAdd(s64, Copies[1], NotInst1);
902   NotReg = Register();
903   EXPECT_TRUE(mi_match(AddInst.getReg(2), *MRI, m_Not(m_Reg(NotReg))));
904   EXPECT_EQ(NotReg, Copies[0]);
905 }
906 
907 TEST_F(AArch64GISelMITest, MatchSpecificReg) {
908   setUp();
909   if (!TM)
910     GTEST_SKIP();
911   auto Cst1 = B.buildConstant(LLT::scalar(64), 42);
912   auto Cst2 = B.buildConstant(LLT::scalar(64), 314);
913   Register Reg = Cst1.getReg(0);
914   // Basic case: Same register twice.
915   EXPECT_TRUE(mi_match(Reg, *MRI, m_SpecificReg(Reg)));
916   // Basic case: Two explicitly different registers.
917   EXPECT_FALSE(mi_match(Reg, *MRI, m_SpecificReg(Cst2.getReg(0))));
918   // Check that we can tell that an instruction uses a specific register.
919   auto Add = B.buildAdd(LLT::scalar(64), Cst1, Cst2);
920   EXPECT_TRUE(mi_match(Add.getReg(0), *MRI, m_GAdd(m_SpecificReg(Reg), m_Reg())));
921 }
922 
923 TEST_F(AArch64GISelMITest, DeferredMatching) {
924   setUp();
925   if (!TM)
926     GTEST_SKIP();
927   auto s64 = LLT::scalar(64);
928   auto s32 = LLT::scalar(32);
929 
930   auto Cst1 = B.buildConstant(s64, 42);
931   auto Cst2 = B.buildConstant(s64, 314);
932   auto Add = B.buildAdd(s64, Cst1, Cst2);
933   auto Sub = B.buildSub(s64, Add, Cst1);
934 
935   auto TruncAdd = B.buildTrunc(s32, Add);
936   auto TruncSub = B.buildTrunc(s32, Sub);
937   auto NarrowAdd = B.buildAdd(s32, TruncAdd, TruncSub);
938 
939   Register X;
940   EXPECT_TRUE(mi_match(Sub.getReg(0), *MRI,
941                        m_GSub(m_GAdd(m_Reg(X), m_Reg()), m_DeferredReg(X))));
942   LLT Ty;
943   EXPECT_TRUE(
944       mi_match(NarrowAdd.getReg(0), *MRI,
945                m_GAdd(m_GTrunc(m_Type(Ty)), m_GTrunc(m_DeferredType(Ty)))));
946 
947   // Test commutative.
948   auto Add2 = B.buildAdd(s64, Sub, Cst1);
949   EXPECT_TRUE(mi_match(Add2.getReg(0), *MRI,
950                        m_GAdd(m_Reg(X), m_GSub(m_Reg(), m_DeferredReg(X)))));
951 }
952 
953 } // namespace
954 
955 int main(int argc, char **argv) {
956   ::testing::InitGoogleTest(&argc, argv);
957   initLLVM();
958   return RUN_ALL_TESTS();
959 }
960