1 //===- PatternMatchTest.cpp -----------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "GISelMITest.h" 10 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" 11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 12 #include "llvm/CodeGen/GlobalISel/Utils.h" 13 #include "llvm/CodeGen/MIRParser/MIRParser.h" 14 #include "llvm/CodeGen/MachineFunction.h" 15 #include "llvm/CodeGen/MachineModuleInfo.h" 16 #include "llvm/CodeGen/TargetFrameLowering.h" 17 #include "llvm/CodeGen/TargetInstrInfo.h" 18 #include "llvm/CodeGen/TargetLowering.h" 19 #include "llvm/CodeGen/TargetSubtargetInfo.h" 20 #include "llvm/MC/TargetRegistry.h" 21 #include "llvm/Support/SourceMgr.h" 22 #include "llvm/Support/TargetSelect.h" 23 #include "llvm/Target/TargetMachine.h" 24 #include "llvm/Target/TargetOptions.h" 25 #include "gtest/gtest.h" 26 27 using namespace llvm; 28 using namespace MIPatternMatch; 29 30 namespace { 31 32 TEST_F(AArch64GISelMITest, MatchIntConstant) { 33 setUp(); 34 if (!TM) 35 return; 36 auto MIBCst = B.buildConstant(LLT::scalar(64), 42); 37 int64_t Cst; 38 bool match = mi_match(MIBCst.getReg(0), *MRI, m_ICst(Cst)); 39 EXPECT_TRUE(match); 40 EXPECT_EQ(Cst, 42); 41 } 42 43 TEST_F(AArch64GISelMITest, MatchIntConstantRegister) { 44 setUp(); 45 if (!TM) 46 return; 47 auto MIBCst = B.buildConstant(LLT::scalar(64), 42); 48 Optional<ValueAndVReg> Src0; 49 bool match = mi_match(MIBCst.getReg(0), *MRI, m_GCst(Src0)); 50 EXPECT_TRUE(match); 51 EXPECT_EQ(Src0->VReg, MIBCst.getReg(0)); 52 } 53 54 TEST_F(AArch64GISelMITest, MatchIntConstantSplat) { 55 setUp(); 56 if (!TM) 57 return; 58 59 LLT s64 = LLT::scalar(64); 60 LLT v4s64 = LLT::fixed_vector(4, s64); 61 62 MachineInstrBuilder FortyTwoSplat = 63 B.buildSplatVector(v4s64, B.buildConstant(s64, 42)); 64 int64_t Cst; 65 EXPECT_TRUE(mi_match(FortyTwoSplat.getReg(0), *MRI, m_ICstOrSplat(Cst))); 66 EXPECT_EQ(Cst, 42); 67 68 MachineInstrBuilder NonConstantSplat = 69 B.buildBuildVector(v4s64, {Copies[0], Copies[0], Copies[0], Copies[0]}); 70 EXPECT_FALSE(mi_match(NonConstantSplat.getReg(0), *MRI, m_ICstOrSplat(Cst))); 71 } 72 73 TEST_F(AArch64GISelMITest, MachineInstrPtrBind) { 74 setUp(); 75 if (!TM) 76 return; 77 auto MIBAdd = B.buildAdd(LLT::scalar(64), Copies[0], Copies[1]); 78 // Test 'MachineInstr *' bind. 79 // Default mi_match. 80 MachineInstr *MIPtr = MIBAdd.getInstr(); 81 bool match = mi_match(MIPtr, *MRI, m_GAdd(m_Reg(), m_Reg())); 82 EXPECT_TRUE(match); 83 // Specialized mi_match for MachineInstr &. 84 MachineInstr &MI = *MIBAdd.getInstr(); 85 match = mi_match(MI, *MRI, m_GAdd(m_Reg(), m_Reg())); 86 EXPECT_TRUE(match); 87 // MachineInstrBuilder has automatic conversion to MachineInstr *. 88 match = mi_match(MIBAdd, *MRI, m_GAdd(m_Reg(), m_Reg())); 89 EXPECT_TRUE(match); 90 // Match instruction without def. 91 auto MIBBrcond = B.buildBrCond(Copies[0], B.getMBB()); 92 MachineInstr *MatchedMI; 93 match = mi_match(MIBBrcond, *MRI, m_MInstr(MatchedMI)); 94 EXPECT_TRUE(match); 95 EXPECT_TRUE(MIBBrcond.getInstr() == MatchedMI); 96 // Match instruction with two defs. 97 auto MIBUAddO = 98 B.buildUAddo(LLT::scalar(64), LLT::scalar(1), Copies[0], Copies[1]); 99 match = mi_match(MIBUAddO, *MRI, m_MInstr(MatchedMI)); 100 EXPECT_TRUE(match); 101 EXPECT_TRUE(MIBUAddO.getInstr() == MatchedMI); 102 } 103 104 TEST_F(AArch64GISelMITest, MatchBinaryOp) { 105 setUp(); 106 if (!TM) 107 return; 108 LLT s32 = LLT::scalar(32); 109 LLT s64 = LLT::scalar(64); 110 LLT p0 = LLT::pointer(0, 64); 111 auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]); 112 // Test case for no bind. 113 bool match = 114 mi_match(MIBAdd.getReg(0), *MRI, m_GAdd(m_Reg(), m_Reg())); 115 EXPECT_TRUE(match); 116 Register Src0, Src1, Src2; 117 match = mi_match(MIBAdd.getReg(0), *MRI, 118 m_GAdd(m_Reg(Src0), m_Reg(Src1))); 119 EXPECT_TRUE(match); 120 EXPECT_EQ(Src0, Copies[0]); 121 EXPECT_EQ(Src1, Copies[1]); 122 123 // Build MUL(ADD %0, %1), %2 124 auto MIBMul = B.buildMul(s64, MIBAdd, Copies[2]); 125 126 // Try to match MUL. 127 match = mi_match(MIBMul.getReg(0), *MRI, 128 m_GMul(m_Reg(Src0), m_Reg(Src1))); 129 EXPECT_TRUE(match); 130 EXPECT_EQ(Src0, MIBAdd.getReg(0)); 131 EXPECT_EQ(Src1, Copies[2]); 132 133 // Try to match MUL(ADD) 134 match = mi_match(MIBMul.getReg(0), *MRI, 135 m_GMul(m_GAdd(m_Reg(Src0), m_Reg(Src1)), m_Reg(Src2))); 136 EXPECT_TRUE(match); 137 EXPECT_EQ(Src0, Copies[0]); 138 EXPECT_EQ(Src1, Copies[1]); 139 EXPECT_EQ(Src2, Copies[2]); 140 141 // Test Commutativity. 142 auto MIBMul2 = B.buildMul(s64, Copies[0], B.buildConstant(s64, 42)); 143 // Try to match MUL(Cst, Reg) on src of MUL(Reg, Cst) to validate 144 // commutativity. 145 int64_t Cst; 146 match = mi_match(MIBMul2.getReg(0), *MRI, 147 m_GMul(m_ICst(Cst), m_Reg(Src0))); 148 EXPECT_TRUE(match); 149 EXPECT_EQ(Cst, 42); 150 EXPECT_EQ(Src0, Copies[0]); 151 152 // Make sure commutative doesn't work with something like SUB. 153 auto MIBSub = B.buildSub(s64, Copies[0], B.buildConstant(s64, 42)); 154 match = mi_match(MIBSub.getReg(0), *MRI, 155 m_GSub(m_ICst(Cst), m_Reg(Src0))); 156 EXPECT_FALSE(match); 157 158 auto MIBFMul = B.buildInstr(TargetOpcode::G_FMUL, {s64}, 159 {Copies[0], B.buildConstant(s64, 42)}); 160 // Match and test commutativity for FMUL. 161 match = mi_match(MIBFMul.getReg(0), *MRI, 162 m_GFMul(m_ICst(Cst), m_Reg(Src0))); 163 EXPECT_TRUE(match); 164 EXPECT_EQ(Cst, 42); 165 EXPECT_EQ(Src0, Copies[0]); 166 167 // FSUB 168 auto MIBFSub = B.buildInstr(TargetOpcode::G_FSUB, {s64}, 169 {Copies[0], B.buildConstant(s64, 42)}); 170 match = mi_match(MIBFSub.getReg(0), *MRI, 171 m_GFSub(m_Reg(Src0), m_Reg())); 172 EXPECT_TRUE(match); 173 EXPECT_EQ(Src0, Copies[0]); 174 175 // Build AND %0, %1 176 auto MIBAnd = B.buildAnd(s64, Copies[0], Copies[1]); 177 // Try to match AND. 178 match = mi_match(MIBAnd.getReg(0), *MRI, 179 m_GAnd(m_Reg(Src0), m_Reg(Src1))); 180 EXPECT_TRUE(match); 181 EXPECT_EQ(Src0, Copies[0]); 182 EXPECT_EQ(Src1, Copies[1]); 183 184 // Build OR %0, %1 185 auto MIBOr = B.buildOr(s64, Copies[0], Copies[1]); 186 // Try to match OR. 187 match = mi_match(MIBOr.getReg(0), *MRI, 188 m_GOr(m_Reg(Src0), m_Reg(Src1))); 189 EXPECT_TRUE(match); 190 EXPECT_EQ(Src0, Copies[0]); 191 EXPECT_EQ(Src1, Copies[1]); 192 193 // Match lshr, and make sure a different shift amount type works. 194 auto TruncCopy1 = B.buildTrunc(s32, Copies[1]); 195 auto LShr = B.buildLShr(s64, Copies[0], TruncCopy1); 196 match = mi_match(LShr.getReg(0), *MRI, 197 m_GLShr(m_Reg(Src0), m_Reg(Src1))); 198 EXPECT_TRUE(match); 199 EXPECT_EQ(Src0, Copies[0]); 200 EXPECT_EQ(Src1, TruncCopy1.getReg(0)); 201 202 // Match shl, and make sure a different shift amount type works. 203 auto Shl = B.buildShl(s64, Copies[0], TruncCopy1); 204 match = mi_match(Shl.getReg(0), *MRI, 205 m_GShl(m_Reg(Src0), m_Reg(Src1))); 206 EXPECT_TRUE(match); 207 EXPECT_EQ(Src0, Copies[0]); 208 EXPECT_EQ(Src1, TruncCopy1.getReg(0)); 209 210 // Build a G_PTR_ADD and check that we can match it. 211 auto PtrAdd = B.buildPtrAdd(p0, {B.buildUndef(p0)}, Copies[0]); 212 match = mi_match(PtrAdd.getReg(0), *MRI, m_GPtrAdd(m_Reg(Src0), m_Reg(Src1))); 213 EXPECT_TRUE(match); 214 EXPECT_EQ(Src0, PtrAdd->getOperand(1).getReg()); 215 EXPECT_EQ(Src1, Copies[0]); 216 217 auto MIBCst = B.buildConstant(s64, 42); 218 auto MIBAddCst = B.buildAdd(s64, MIBCst, Copies[0]); 219 auto MIBUnmerge = B.buildUnmerge({s32, s32}, B.buildConstant(s64, 42)); 220 221 // m_BinOp with opcode. 222 // Match binary instruction, opcode and its non-commutative operands. 223 match = mi_match(MIBAddCst, *MRI, 224 m_BinOp(TargetOpcode::G_ADD, m_ICst(Cst), m_Reg(Src0))); 225 EXPECT_TRUE(match); 226 EXPECT_EQ(Src0, Copies[0]); 227 EXPECT_EQ(Cst, 42); 228 229 // Opcode doesn't match. 230 match = mi_match(MIBAddCst, *MRI, 231 m_BinOp(TargetOpcode::G_MUL, m_ICst(Cst), m_Reg(Src0))); 232 EXPECT_FALSE(match); 233 234 match = mi_match(MIBAddCst, *MRI, 235 m_BinOp(TargetOpcode::G_ADD, m_Reg(Src0), m_ICst(Cst))); 236 EXPECT_FALSE(match); 237 238 // Instruction is not binary. 239 match = mi_match(MIBCst, *MRI, 240 m_BinOp(TargetOpcode::G_MUL, m_Reg(Src0), m_Reg(Src1))); 241 EXPECT_FALSE(match); 242 match = mi_match(MIBUnmerge, *MRI, 243 m_BinOp(TargetOpcode::G_MUL, m_Reg(Src0), m_Reg(Src1))); 244 EXPECT_FALSE(match); 245 246 // m_CommutativeBinOp with opcode. 247 match = mi_match( 248 MIBAddCst, *MRI, 249 m_CommutativeBinOp(TargetOpcode::G_ADD, m_ICst(Cst), m_Reg(Src0))); 250 EXPECT_TRUE(match); 251 EXPECT_EQ(Src0, Copies[0]); 252 EXPECT_EQ(Cst, 42); 253 254 match = mi_match( 255 MIBAddCst, *MRI, 256 m_CommutativeBinOp(TargetOpcode::G_MUL, m_ICst(Cst), m_Reg(Src0))); 257 EXPECT_FALSE(match); 258 259 match = mi_match( 260 MIBAddCst, *MRI, 261 m_CommutativeBinOp(TargetOpcode::G_ADD, m_Reg(Src0), m_ICst(Cst))); 262 EXPECT_TRUE(match); 263 EXPECT_EQ(Src0, Copies[0]); 264 EXPECT_EQ(Cst, 42); 265 266 match = mi_match( 267 MIBCst, *MRI, 268 m_CommutativeBinOp(TargetOpcode::G_MUL, m_Reg(Src0), m_Reg(Src1))); 269 EXPECT_FALSE(match); 270 match = mi_match( 271 MIBUnmerge, *MRI, 272 m_CommutativeBinOp(TargetOpcode::G_MUL, m_Reg(Src0), m_Reg(Src1))); 273 EXPECT_FALSE(match); 274 } 275 276 TEST_F(AArch64GISelMITest, MatchICmp) { 277 setUp(); 278 if (!TM) 279 return; 280 281 const LLT s1 = LLT::scalar(1); 282 auto CmpEq = B.buildICmp(CmpInst::ICMP_EQ, s1, Copies[0], Copies[1]); 283 284 // Check match any predicate. 285 bool match = 286 mi_match(CmpEq.getReg(0), *MRI, m_GICmp(m_Pred(), m_Reg(), m_Reg())); 287 EXPECT_TRUE(match); 288 289 // Check we get the predicate and registers. 290 CmpInst::Predicate Pred; 291 Register Reg0; 292 Register Reg1; 293 match = mi_match(CmpEq.getReg(0), *MRI, 294 m_GICmp(m_Pred(Pred), m_Reg(Reg0), m_Reg(Reg1))); 295 EXPECT_TRUE(match); 296 EXPECT_EQ(CmpInst::ICMP_EQ, Pred); 297 EXPECT_EQ(Copies[0], Reg0); 298 EXPECT_EQ(Copies[1], Reg1); 299 } 300 301 TEST_F(AArch64GISelMITest, MatchFCmp) { 302 setUp(); 303 if (!TM) 304 return; 305 306 const LLT s1 = LLT::scalar(1); 307 auto CmpEq = B.buildFCmp(CmpInst::FCMP_OEQ, s1, Copies[0], Copies[1]); 308 309 // Check match any predicate. 310 bool match = 311 mi_match(CmpEq.getReg(0), *MRI, m_GFCmp(m_Pred(), m_Reg(), m_Reg())); 312 EXPECT_TRUE(match); 313 314 // Check we get the predicate and registers. 315 CmpInst::Predicate Pred; 316 Register Reg0; 317 Register Reg1; 318 match = mi_match(CmpEq.getReg(0), *MRI, 319 m_GFCmp(m_Pred(Pred), m_Reg(Reg0), m_Reg(Reg1))); 320 EXPECT_TRUE(match); 321 EXPECT_EQ(CmpInst::FCMP_OEQ, Pred); 322 EXPECT_EQ(Copies[0], Reg0); 323 EXPECT_EQ(Copies[1], Reg1); 324 } 325 326 TEST_F(AArch64GISelMITest, MatchFPUnaryOp) { 327 setUp(); 328 if (!TM) 329 return; 330 331 // Truncate s64 to s32. 332 LLT s32 = LLT::scalar(32); 333 auto Copy0s32 = B.buildFPTrunc(s32, Copies[0]); 334 335 // Match G_FABS. 336 auto MIBFabs = B.buildInstr(TargetOpcode::G_FABS, {s32}, {Copy0s32}); 337 bool match = 338 mi_match(MIBFabs.getReg(0), *MRI, m_GFabs(m_Reg())); 339 EXPECT_TRUE(match); 340 341 Register Src; 342 auto MIBFNeg = B.buildInstr(TargetOpcode::G_FNEG, {s32}, {Copy0s32}); 343 match = mi_match(MIBFNeg.getReg(0), *MRI, m_GFNeg(m_Reg(Src))); 344 EXPECT_TRUE(match); 345 EXPECT_EQ(Src, Copy0s32.getReg(0)); 346 347 match = mi_match(MIBFabs.getReg(0), *MRI, m_GFabs(m_Reg(Src))); 348 EXPECT_TRUE(match); 349 EXPECT_EQ(Src, Copy0s32.getReg(0)); 350 351 // Build and match FConstant. 352 auto MIBFCst = B.buildFConstant(s32, .5); 353 const ConstantFP *TmpFP{}; 354 match = mi_match(MIBFCst.getReg(0), *MRI, m_GFCst(TmpFP)); 355 EXPECT_TRUE(match); 356 EXPECT_TRUE(TmpFP); 357 APFloat APF((float).5); 358 auto *CFP = ConstantFP::get(Context, APF); 359 EXPECT_EQ(CFP, TmpFP); 360 361 // Build double float. 362 LLT s64 = LLT::scalar(64); 363 auto MIBFCst64 = B.buildFConstant(s64, .5); 364 const ConstantFP *TmpFP64{}; 365 match = mi_match(MIBFCst64.getReg(0), *MRI, m_GFCst(TmpFP64)); 366 EXPECT_TRUE(match); 367 EXPECT_TRUE(TmpFP64); 368 APFloat APF64(.5); 369 auto CFP64 = ConstantFP::get(Context, APF64); 370 EXPECT_EQ(CFP64, TmpFP64); 371 EXPECT_NE(TmpFP64, TmpFP); 372 373 // Build half float. 374 LLT s16 = LLT::scalar(16); 375 auto MIBFCst16 = B.buildFConstant(s16, .5); 376 const ConstantFP *TmpFP16{}; 377 match = mi_match(MIBFCst16.getReg(0), *MRI, m_GFCst(TmpFP16)); 378 EXPECT_TRUE(match); 379 EXPECT_TRUE(TmpFP16); 380 bool Ignored; 381 APFloat APF16(.5); 382 APF16.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored); 383 auto CFP16 = ConstantFP::get(Context, APF16); 384 EXPECT_EQ(TmpFP16, CFP16); 385 EXPECT_NE(TmpFP16, TmpFP); 386 } 387 388 TEST_F(AArch64GISelMITest, MatchExtendsTrunc) { 389 setUp(); 390 if (!TM) 391 return; 392 393 LLT s64 = LLT::scalar(64); 394 LLT s32 = LLT::scalar(32); 395 396 auto MIBTrunc = B.buildTrunc(s32, Copies[0]); 397 auto MIBAExt = B.buildAnyExt(s64, MIBTrunc); 398 auto MIBZExt = B.buildZExt(s64, MIBTrunc); 399 auto MIBSExt = B.buildSExt(s64, MIBTrunc); 400 Register Src0; 401 bool match = 402 mi_match(MIBTrunc.getReg(0), *MRI, m_GTrunc(m_Reg(Src0))); 403 EXPECT_TRUE(match); 404 EXPECT_EQ(Src0, Copies[0]); 405 match = 406 mi_match(MIBAExt.getReg(0), *MRI, m_GAnyExt(m_Reg(Src0))); 407 EXPECT_TRUE(match); 408 EXPECT_EQ(Src0, MIBTrunc.getReg(0)); 409 410 match = mi_match(MIBSExt.getReg(0), *MRI, m_GSExt(m_Reg(Src0))); 411 EXPECT_TRUE(match); 412 EXPECT_EQ(Src0, MIBTrunc.getReg(0)); 413 414 match = mi_match(MIBZExt.getReg(0), *MRI, m_GZExt(m_Reg(Src0))); 415 EXPECT_TRUE(match); 416 EXPECT_EQ(Src0, MIBTrunc.getReg(0)); 417 418 // Match ext(trunc src) 419 match = mi_match(MIBAExt.getReg(0), *MRI, 420 m_GAnyExt(m_GTrunc(m_Reg(Src0)))); 421 EXPECT_TRUE(match); 422 EXPECT_EQ(Src0, Copies[0]); 423 424 match = mi_match(MIBSExt.getReg(0), *MRI, 425 m_GSExt(m_GTrunc(m_Reg(Src0)))); 426 EXPECT_TRUE(match); 427 EXPECT_EQ(Src0, Copies[0]); 428 429 match = mi_match(MIBZExt.getReg(0), *MRI, 430 m_GZExt(m_GTrunc(m_Reg(Src0)))); 431 EXPECT_TRUE(match); 432 EXPECT_EQ(Src0, Copies[0]); 433 } 434 435 TEST_F(AArch64GISelMITest, MatchSpecificType) { 436 setUp(); 437 if (!TM) 438 return; 439 440 // Try to match a 64bit add. 441 LLT s64 = LLT::scalar(64); 442 LLT s32 = LLT::scalar(32); 443 auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]); 444 EXPECT_FALSE(mi_match(MIBAdd.getReg(0), *MRI, 445 m_GAdd(m_SpecificType(s32), m_Reg()))); 446 EXPECT_TRUE(mi_match(MIBAdd.getReg(0), *MRI, 447 m_GAdd(m_SpecificType(s64), m_Reg()))); 448 449 // Try to match the destination type of a bitcast. 450 LLT v2s32 = LLT::fixed_vector(2, 32); 451 auto MIBCast = B.buildCast(v2s32, Copies[0]); 452 EXPECT_TRUE( 453 mi_match(MIBCast.getReg(0), *MRI, m_GBitcast(m_Reg()))); 454 EXPECT_TRUE( 455 mi_match(MIBCast.getReg(0), *MRI, m_SpecificType(v2s32))); 456 EXPECT_TRUE( 457 mi_match(MIBCast.getReg(1), *MRI, m_SpecificType(s64))); 458 459 // Build a PTRToInt and INTTOPTR and match and test them. 460 LLT PtrTy = LLT::pointer(0, 64); 461 auto MIBIntToPtr = B.buildCast(PtrTy, Copies[0]); 462 auto MIBPtrToInt = B.buildCast(s64, MIBIntToPtr); 463 Register Src0; 464 465 // match the ptrtoint(inttoptr reg) 466 bool match = mi_match(MIBPtrToInt.getReg(0), *MRI, 467 m_GPtrToInt(m_GIntToPtr(m_Reg(Src0)))); 468 EXPECT_TRUE(match); 469 EXPECT_EQ(Src0, Copies[0]); 470 } 471 472 TEST_F(AArch64GISelMITest, MatchCombinators) { 473 setUp(); 474 if (!TM) 475 return; 476 477 LLT s64 = LLT::scalar(64); 478 LLT s32 = LLT::scalar(32); 479 auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]); 480 Register Src0, Src1; 481 bool match = 482 mi_match(MIBAdd.getReg(0), *MRI, 483 m_all_of(m_SpecificType(s64), m_GAdd(m_Reg(Src0), m_Reg(Src1)))); 484 EXPECT_TRUE(match); 485 EXPECT_EQ(Src0, Copies[0]); 486 EXPECT_EQ(Src1, Copies[1]); 487 // Check for s32 (which should fail). 488 match = 489 mi_match(MIBAdd.getReg(0), *MRI, 490 m_all_of(m_SpecificType(s32), m_GAdd(m_Reg(Src0), m_Reg(Src1)))); 491 EXPECT_FALSE(match); 492 match = 493 mi_match(MIBAdd.getReg(0), *MRI, 494 m_any_of(m_SpecificType(s32), m_GAdd(m_Reg(Src0), m_Reg(Src1)))); 495 EXPECT_TRUE(match); 496 EXPECT_EQ(Src0, Copies[0]); 497 EXPECT_EQ(Src1, Copies[1]); 498 499 // Match a case where none of the predicates hold true. 500 match = mi_match( 501 MIBAdd.getReg(0), *MRI, 502 m_any_of(m_SpecificType(LLT::scalar(16)), m_GSub(m_Reg(), m_Reg()))); 503 EXPECT_FALSE(match); 504 } 505 506 TEST_F(AArch64GISelMITest, MatchMiscellaneous) { 507 setUp(); 508 if (!TM) 509 return; 510 511 LLT s64 = LLT::scalar(64); 512 auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]); 513 Register Reg = MIBAdd.getReg(0); 514 515 // Only one use of Reg. 516 B.buildCast(LLT::pointer(0, 32), MIBAdd); 517 EXPECT_TRUE(mi_match(Reg, *MRI, m_OneUse(m_GAdd(m_Reg(), m_Reg())))); 518 EXPECT_TRUE(mi_match(Reg, *MRI, m_OneNonDBGUse(m_GAdd(m_Reg(), m_Reg())))); 519 520 // Add multiple debug uses of Reg. 521 B.buildInstr(TargetOpcode::DBG_VALUE, {}, {Reg}); 522 B.buildInstr(TargetOpcode::DBG_VALUE, {}, {Reg}); 523 524 EXPECT_FALSE(mi_match(Reg, *MRI, m_OneUse(m_GAdd(m_Reg(), m_Reg())))); 525 EXPECT_TRUE(mi_match(Reg, *MRI, m_OneNonDBGUse(m_GAdd(m_Reg(), m_Reg())))); 526 527 // Multiple non-debug uses of Reg. 528 B.buildCast(LLT::pointer(1, 32), MIBAdd); 529 EXPECT_FALSE(mi_match(Reg, *MRI, m_OneUse(m_GAdd(m_Reg(), m_Reg())))); 530 EXPECT_FALSE(mi_match(Reg, *MRI, m_OneNonDBGUse(m_GAdd(m_Reg(), m_Reg())))); 531 } 532 533 TEST_F(AArch64GISelMITest, MatchSpecificConstant) { 534 setUp(); 535 if (!TM) 536 return; 537 538 // Basic case: Can we match a G_CONSTANT with a specific value? 539 auto FortyTwo = B.buildConstant(LLT::scalar(64), 42); 540 EXPECT_TRUE(mi_match(FortyTwo.getReg(0), *MRI, m_SpecificICst(42))); 541 EXPECT_FALSE(mi_match(FortyTwo.getReg(0), *MRI, m_SpecificICst(123))); 542 543 // Test that this works inside of a more complex pattern. 544 LLT s64 = LLT::scalar(64); 545 auto MIBAdd = B.buildAdd(s64, Copies[0], FortyTwo); 546 EXPECT_TRUE(mi_match(MIBAdd.getReg(2), *MRI, m_SpecificICst(42))); 547 548 // Wrong constant. 549 EXPECT_FALSE(mi_match(MIBAdd.getReg(2), *MRI, m_SpecificICst(123))); 550 551 // No constant on the LHS. 552 EXPECT_FALSE(mi_match(MIBAdd.getReg(1), *MRI, m_SpecificICst(42))); 553 } 554 555 TEST_F(AArch64GISelMITest, MatchSpecificConstantSplat) { 556 setUp(); 557 if (!TM) 558 return; 559 560 LLT s64 = LLT::scalar(64); 561 LLT v4s64 = LLT::fixed_vector(4, s64); 562 563 MachineInstrBuilder FortyTwoSplat = 564 B.buildSplatVector(v4s64, B.buildConstant(s64, 42)); 565 MachineInstrBuilder FortyTwo = B.buildConstant(s64, 42); 566 567 EXPECT_TRUE(mi_match(FortyTwoSplat.getReg(0), *MRI, m_SpecificICstSplat(42))); 568 EXPECT_FALSE( 569 mi_match(FortyTwoSplat.getReg(0), *MRI, m_SpecificICstSplat(43))); 570 EXPECT_FALSE(mi_match(FortyTwo.getReg(0), *MRI, m_SpecificICstSplat(42))); 571 572 MachineInstrBuilder NonConstantSplat = 573 B.buildBuildVector(v4s64, {Copies[0], Copies[0], Copies[0], Copies[0]}); 574 575 MachineInstrBuilder AddSplat = 576 B.buildAdd(v4s64, NonConstantSplat, FortyTwoSplat); 577 EXPECT_TRUE(mi_match(AddSplat.getReg(2), *MRI, m_SpecificICstSplat(42))); 578 EXPECT_FALSE(mi_match(AddSplat.getReg(2), *MRI, m_SpecificICstSplat(43))); 579 EXPECT_FALSE(mi_match(AddSplat.getReg(1), *MRI, m_SpecificICstSplat(42))); 580 581 MachineInstrBuilder Add = B.buildAdd(s64, Copies[0], FortyTwo); 582 EXPECT_FALSE(mi_match(Add.getReg(2), *MRI, m_SpecificICstSplat(42))); 583 } 584 585 TEST_F(AArch64GISelMITest, MatchSpecificConstantOrSplat) { 586 setUp(); 587 if (!TM) 588 return; 589 590 LLT s64 = LLT::scalar(64); 591 LLT v4s64 = LLT::fixed_vector(4, s64); 592 593 MachineInstrBuilder FortyTwoSplat = 594 B.buildSplatVector(v4s64, B.buildConstant(s64, 42)); 595 MachineInstrBuilder FortyTwo = B.buildConstant(s64, 42); 596 597 EXPECT_TRUE( 598 mi_match(FortyTwoSplat.getReg(0), *MRI, m_SpecificICstOrSplat(42))); 599 EXPECT_FALSE( 600 mi_match(FortyTwoSplat.getReg(0), *MRI, m_SpecificICstOrSplat(43))); 601 EXPECT_TRUE(mi_match(FortyTwo.getReg(0), *MRI, m_SpecificICstOrSplat(42))); 602 603 MachineInstrBuilder NonConstantSplat = 604 B.buildBuildVector(v4s64, {Copies[0], Copies[0], Copies[0], Copies[0]}); 605 606 MachineInstrBuilder AddSplat = 607 B.buildAdd(v4s64, NonConstantSplat, FortyTwoSplat); 608 EXPECT_TRUE(mi_match(AddSplat.getReg(2), *MRI, m_SpecificICstOrSplat(42))); 609 EXPECT_FALSE(mi_match(AddSplat.getReg(2), *MRI, m_SpecificICstOrSplat(43))); 610 EXPECT_FALSE(mi_match(AddSplat.getReg(1), *MRI, m_SpecificICstOrSplat(42))); 611 612 MachineInstrBuilder Add = B.buildAdd(s64, Copies[0], FortyTwo); 613 EXPECT_TRUE(mi_match(Add.getReg(2), *MRI, m_SpecificICstOrSplat(42))); 614 } 615 616 TEST_F(AArch64GISelMITest, MatchZeroInt) { 617 setUp(); 618 if (!TM) 619 return; 620 auto Zero = B.buildConstant(LLT::scalar(64), 0); 621 EXPECT_TRUE(mi_match(Zero.getReg(0), *MRI, m_ZeroInt())); 622 623 auto FortyTwo = B.buildConstant(LLT::scalar(64), 42); 624 EXPECT_FALSE(mi_match(FortyTwo.getReg(0), *MRI, m_ZeroInt())); 625 } 626 627 TEST_F(AArch64GISelMITest, MatchAllOnesInt) { 628 setUp(); 629 if (!TM) 630 return; 631 auto AllOnes = B.buildConstant(LLT::scalar(64), -1); 632 EXPECT_TRUE(mi_match(AllOnes.getReg(0), *MRI, m_AllOnesInt())); 633 634 auto FortyTwo = B.buildConstant(LLT::scalar(64), 42); 635 EXPECT_FALSE(mi_match(FortyTwo.getReg(0), *MRI, m_AllOnesInt())); 636 } 637 638 TEST_F(AArch64GISelMITest, MatchFPOrIntConst) { 639 setUp(); 640 if (!TM) 641 return; 642 643 Register IntOne = B.buildConstant(LLT::scalar(64), 1).getReg(0); 644 Register FPOne = B.buildFConstant(LLT::scalar(64), 1.0).getReg(0); 645 Optional<ValueAndVReg> ValReg; 646 Optional<FPValueAndVReg> FValReg; 647 648 EXPECT_TRUE(mi_match(IntOne, *MRI, m_GCst(ValReg))); 649 EXPECT_EQ(IntOne, ValReg->VReg); 650 EXPECT_FALSE(mi_match(IntOne, *MRI, m_GFCst(FValReg))); 651 652 EXPECT_FALSE(mi_match(FPOne, *MRI, m_GCst(ValReg))); 653 EXPECT_TRUE(mi_match(FPOne, *MRI, m_GFCst(FValReg))); 654 EXPECT_EQ(FPOne, FValReg->VReg); 655 } 656 657 TEST_F(AArch64GISelMITest, MatchConstantSplat) { 658 setUp(); 659 if (!TM) 660 return; 661 662 LLT s64 = LLT::scalar(64); 663 LLT v4s64 = LLT::fixed_vector(4, 64); 664 665 Register FPOne = B.buildFConstant(s64, 1.0).getReg(0); 666 Register FPZero = B.buildFConstant(s64, 0.0).getReg(0); 667 Register Undef = B.buildUndef(s64).getReg(0); 668 Optional<FPValueAndVReg> FValReg; 669 670 // GFCstOrSplatGFCstMatch allows undef as part of splat. Undef often comes 671 // from padding to legalize into available operation and then ignore added 672 // elements e.g. v3s64 to v4s64. 673 674 EXPECT_TRUE(mi_match(FPZero, *MRI, GFCstOrSplatGFCstMatch(FValReg))); 675 EXPECT_EQ(FPZero, FValReg->VReg); 676 677 EXPECT_FALSE(mi_match(Undef, *MRI, GFCstOrSplatGFCstMatch(FValReg))); 678 679 auto ZeroSplat = B.buildBuildVector(v4s64, {FPZero, FPZero, FPZero, FPZero}); 680 EXPECT_TRUE( 681 mi_match(ZeroSplat.getReg(0), *MRI, GFCstOrSplatGFCstMatch(FValReg))); 682 EXPECT_EQ(FPZero, FValReg->VReg); 683 684 auto ZeroUndef = B.buildBuildVector(v4s64, {FPZero, FPZero, FPZero, Undef}); 685 EXPECT_TRUE( 686 mi_match(ZeroUndef.getReg(0), *MRI, GFCstOrSplatGFCstMatch(FValReg))); 687 EXPECT_EQ(FPZero, FValReg->VReg); 688 689 // All undefs are not constant splat. 690 auto UndefSplat = B.buildBuildVector(v4s64, {Undef, Undef, Undef, Undef}); 691 EXPECT_FALSE( 692 mi_match(UndefSplat.getReg(0), *MRI, GFCstOrSplatGFCstMatch(FValReg))); 693 694 auto ZeroOne = B.buildBuildVector(v4s64, {FPZero, FPZero, FPZero, FPOne}); 695 EXPECT_FALSE( 696 mi_match(ZeroOne.getReg(0), *MRI, GFCstOrSplatGFCstMatch(FValReg))); 697 698 auto NonConstantSplat = 699 B.buildBuildVector(v4s64, {Copies[0], Copies[0], Copies[0], Copies[0]}); 700 EXPECT_FALSE(mi_match(NonConstantSplat.getReg(0), *MRI, 701 GFCstOrSplatGFCstMatch(FValReg))); 702 703 auto Mixed = B.buildBuildVector(v4s64, {FPZero, FPZero, FPZero, Copies[0]}); 704 EXPECT_FALSE( 705 mi_match(Mixed.getReg(0), *MRI, GFCstOrSplatGFCstMatch(FValReg))); 706 } 707 708 TEST_F(AArch64GISelMITest, MatchNeg) { 709 setUp(); 710 if (!TM) 711 return; 712 713 LLT s64 = LLT::scalar(64); 714 auto Zero = B.buildConstant(LLT::scalar(64), 0); 715 auto NegInst = B.buildSub(s64, Zero, Copies[0]); 716 Register NegatedReg; 717 718 // Match: G_SUB = 0, %Reg 719 EXPECT_TRUE(mi_match(NegInst.getReg(0), *MRI, m_Neg(m_Reg(NegatedReg)))); 720 EXPECT_EQ(NegatedReg, Copies[0]); 721 722 // Don't match: G_SUB = %Reg, 0 723 auto NotNegInst1 = B.buildSub(s64, Copies[0], Zero); 724 EXPECT_FALSE(mi_match(NotNegInst1.getReg(0), *MRI, m_Neg(m_Reg(NegatedReg)))); 725 726 // Don't match: G_SUB = 42, %Reg 727 auto FortyTwo = B.buildConstant(LLT::scalar(64), 42); 728 auto NotNegInst2 = B.buildSub(s64, FortyTwo, Copies[0]); 729 EXPECT_FALSE(mi_match(NotNegInst2.getReg(0), *MRI, m_Neg(m_Reg(NegatedReg)))); 730 731 // Complex testcase. 732 // %sub = G_SUB = 0, %negated_reg 733 // %add = G_ADD = %x, %sub 734 auto AddInst = B.buildAdd(s64, Copies[1], NegInst); 735 NegatedReg = Register(); 736 EXPECT_TRUE(mi_match(AddInst.getReg(2), *MRI, m_Neg(m_Reg(NegatedReg)))); 737 EXPECT_EQ(NegatedReg, Copies[0]); 738 } 739 740 TEST_F(AArch64GISelMITest, MatchNot) { 741 setUp(); 742 if (!TM) 743 return; 744 745 LLT s64 = LLT::scalar(64); 746 auto AllOnes = B.buildConstant(LLT::scalar(64), -1); 747 auto NotInst1 = B.buildXor(s64, Copies[0], AllOnes); 748 Register NotReg; 749 750 // Match: G_XOR %NotReg, -1 751 EXPECT_TRUE(mi_match(NotInst1.getReg(0), *MRI, m_Not(m_Reg(NotReg)))); 752 EXPECT_EQ(NotReg, Copies[0]); 753 754 // Match: G_XOR -1, %NotReg 755 auto NotInst2 = B.buildXor(s64, AllOnes, Copies[1]); 756 EXPECT_TRUE(mi_match(NotInst2.getReg(0), *MRI, m_Not(m_Reg(NotReg)))); 757 EXPECT_EQ(NotReg, Copies[1]); 758 759 // Don't match: G_XOR %NotReg, 42 760 auto FortyTwo = B.buildConstant(LLT::scalar(64), 42); 761 auto WrongCst = B.buildXor(s64, Copies[0], FortyTwo); 762 EXPECT_FALSE(mi_match(WrongCst.getReg(0), *MRI, m_Not(m_Reg(NotReg)))); 763 764 // Complex testcase. 765 // %xor = G_XOR %NotReg, -1 766 // %add = G_ADD %x, %xor 767 auto AddInst = B.buildAdd(s64, Copies[1], NotInst1); 768 NotReg = Register(); 769 EXPECT_TRUE(mi_match(AddInst.getReg(2), *MRI, m_Not(m_Reg(NotReg)))); 770 EXPECT_EQ(NotReg, Copies[0]); 771 } 772 773 TEST_F(AArch64GISelMITest, MatchSpecificReg) { 774 setUp(); 775 if (!TM) 776 return; 777 auto Cst1 = B.buildConstant(LLT::scalar(64), 42); 778 auto Cst2 = B.buildConstant(LLT::scalar(64), 314); 779 Register Reg = Cst1.getReg(0); 780 // Basic case: Same register twice. 781 EXPECT_TRUE(mi_match(Reg, *MRI, m_SpecificReg(Reg))); 782 // Basic case: Two explicitly different registers. 783 EXPECT_FALSE(mi_match(Reg, *MRI, m_SpecificReg(Cst2.getReg(0)))); 784 // Check that we can tell that an instruction uses a specific register. 785 auto Add = B.buildAdd(LLT::scalar(64), Cst1, Cst2); 786 EXPECT_TRUE(mi_match(Add.getReg(0), *MRI, m_GAdd(m_SpecificReg(Reg), m_Reg()))); 787 } 788 789 } // namespace 790 791 int main(int argc, char **argv) { 792 ::testing::InitGoogleTest(&argc, argv); 793 initLLVM(); 794 return RUN_ALL_TESTS(); 795 } 796