xref: /llvm-project/llvm/test/CodeGen/X86/combine-testm-and.ll (revision 2f448bf509432c1a19ec46ab8cbc7353c03c6280)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2;RUN: llc -mtriple=x86_64-apple-darwin -mcpu=skx < %s | FileCheck %s
3
4define i32 @combineTESTM_AND_1(<8 x i64> %a, <8 x i64> %b) {
5; CHECK-LABEL: combineTESTM_AND_1:
6; CHECK:       ## %bb.0:
7; CHECK-NEXT:    vptestmq %zmm0, %zmm1, %k0
8; CHECK-NEXT:    kmovb %k0, %eax
9; CHECK-NEXT:    vzeroupper
10; CHECK-NEXT:    retq
11  %and.i = and <8 x i64> %b, %a
12  %test.i = tail call i8 @llvm.x86.avx512.ptestm.q.512(<8 x i64> %and.i, <8 x i64> %and.i, i8 -1)
13  %conv = zext i8 %test.i to i32
14  ret i32 %conv
15}
16
17define i32 @combineTESTM_AND_2(<8 x i64> %a, <8 x i64> %b , i8 %mask) {
18; CHECK-LABEL: combineTESTM_AND_2:
19; CHECK:       ## %bb.0:
20; CHECK-NEXT:    vptestmq %zmm0, %zmm1, %k0
21; CHECK-NEXT:    kmovd %k0, %eax
22; CHECK-NEXT:    andb %dil, %al
23; CHECK-NEXT:    movzbl %al, %eax
24; CHECK-NEXT:    vzeroupper
25; CHECK-NEXT:    retq
26  %and.i = and <8 x i64> %b, %a
27  %test.i = tail call i8 @llvm.x86.avx512.ptestm.q.512(<8 x i64> %and.i, <8 x i64> %and.i, i8 %mask)
28  %conv = zext i8 %test.i to i32
29  ret i32 %conv
30}
31
32define i32 @combineTESTM_AND_mask_3(<8 x i64> %a, ptr %bptr , i8 %mask) {
33; CHECK-LABEL: combineTESTM_AND_mask_3:
34; CHECK:       ## %bb.0:
35; CHECK-NEXT:    vptestmq (%rdi), %zmm0, %k0
36; CHECK-NEXT:    kmovd %k0, %eax
37; CHECK-NEXT:    andb %sil, %al
38; CHECK-NEXT:    movzbl %al, %eax
39; CHECK-NEXT:    vzeroupper
40; CHECK-NEXT:    retq
41  %b = load <8 x i64>, ptr %bptr
42  %and.i = and <8 x i64> %a, %b
43  %test.i = tail call i8 @llvm.x86.avx512.ptestm.q.512(<8 x i64> %and.i, <8 x i64> %and.i, i8 %mask)
44  %conv = zext i8 %test.i to i32
45  ret i32 %conv
46}
47
48define i32 @combineTESTM_AND_mask_4(<8 x i64> %a, ptr %bptr , i8 %mask) {
49; CHECK-LABEL: combineTESTM_AND_mask_4:
50; CHECK:       ## %bb.0:
51; CHECK-NEXT:    vptestmq (%rdi), %zmm0, %k0
52; CHECK-NEXT:    kmovd %k0, %eax
53; CHECK-NEXT:    andb %sil, %al
54; CHECK-NEXT:    movzbl %al, %eax
55; CHECK-NEXT:    vzeroupper
56; CHECK-NEXT:    retq
57  %b = load <8 x i64>, ptr %bptr
58  %and.i = and <8 x i64> %b, %a
59  %test.i = tail call i8 @llvm.x86.avx512.ptestm.q.512(<8 x i64> %and.i, <8 x i64> %and.i, i8 %mask)
60  %conv = zext i8 %test.i to i32
61  ret i32 %conv
62}
63
64declare i8 @llvm.x86.avx512.ptestm.q.512(<8 x i64>, <8 x i64>, i8)
65