xref: /llvm-project/llvm/test/CodeGen/X86/apx/kmov-isel.ll (revision 511ba45a47d6f9e48ad364181830c9fb974135b2)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f,+avx512bw,+egpr --show-mc-encoding | FileCheck --check-prefix=AVX512 %s
3
4define void @bitcast_16i8_store(ptr %p, <16 x i8> %a0) {
5; AVX512-LABEL: bitcast_16i8_store:
6; AVX512:       # %bb.0:
7; AVX512-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
8; AVX512-NEXT:    vpmovb2m %zmm0, %k0 # encoding: [0x62,0xf2,0x7e,0x48,0x29,0xc0]
9; AVX512-NEXT:    kmovw %k0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x91,0x07]
10; AVX512-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
11; AVX512-NEXT:    retq # encoding: [0xc3]
12  %a1 = icmp slt <16 x i8> %a0, zeroinitializer
13  %a2 = bitcast <16 x i1> %a1 to i16
14  store i16 %a2, ptr %p
15  ret void
16}
17
18define void @bitcast_32i8_store(ptr %p, <32 x i8> %a0) {
19; AVX512-LABEL: bitcast_32i8_store:
20; AVX512:       # %bb.0:
21; AVX512-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
22; AVX512-NEXT:    vpmovb2m %zmm0, %k0 # encoding: [0x62,0xf2,0x7e,0x48,0x29,0xc0]
23; AVX512-NEXT:    kmovd %k0, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x91,0x07]
24; AVX512-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
25; AVX512-NEXT:    retq # encoding: [0xc3]
26  %a1 = icmp slt <32 x i8> %a0, zeroinitializer
27  %a2 = bitcast <32 x i1> %a1 to i32
28  store i32 %a2, ptr %p
29  ret void
30}
31
32define void @bitcast_64i8_store(ptr %p, <64 x i8> %a0) {
33; AVX512-LABEL: bitcast_64i8_store:
34; AVX512:       # %bb.0:
35; AVX512-NEXT:    vpmovb2m %zmm0, %k0 # encoding: [0x62,0xf2,0x7e,0x48,0x29,0xc0]
36; AVX512-NEXT:    kmovq %k0, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf8,0x91,0x07]
37; AVX512-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
38; AVX512-NEXT:    retq # encoding: [0xc3]
39  %a1 = icmp slt <64 x i8> %a0, zeroinitializer
40  %a2 = bitcast <64 x i1> %a1 to i64
41  store i64 %a2, ptr %p
42  ret void
43}
44
45define <16 x i1> @bitcast_16i8_load(ptr %p, <16 x i1> %a, <16 x i1> %b) {
46; AVX512-LABEL: bitcast_16i8_load:
47; AVX512:       # %bb.0:
48; AVX512-NEXT:    vpsllw $7, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x71,0xf1,0x07]
49; AVX512-NEXT:    vpmovb2m %zmm1, %k0 # encoding: [0x62,0xf2,0x7e,0x48,0x29,0xc1]
50; AVX512-NEXT:    vpsllw $7, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x71,0xf0,0x07]
51; AVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xef,0xc9]
52; AVX512-NEXT:    kmovw (%rdi), %k1 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x90,0x0f]
53; AVX512-NEXT:    vpcmpgtb %zmm0, %zmm1, %k2 {%k1} # encoding: [0x62,0xf1,0x75,0x49,0x64,0xd0]
54; AVX512-NEXT:    kandnw %k0, %k1, %k0 # encoding: [0xc5,0xf4,0x42,0xc0]
55; AVX512-NEXT:    korw %k0, %k2, %k0 # encoding: [0xc5,0xec,0x45,0xc0]
56; AVX512-NEXT:    vpmovm2b %k0, %zmm0 # encoding: [0x62,0xf2,0x7e,0x48,0x28,0xc0]
57; AVX512-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
58; AVX512-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
59; AVX512-NEXT:    retq # encoding: [0xc3]
60  %mask = load i16, ptr %p
61  %vmask = bitcast i16 %mask to <16 x i1>
62  %res = select <16 x i1> %vmask, <16 x i1> %a, <16 x i1> %b
63  ret <16 x i1> %res
64}
65
66define <32 x i1> @bitcast_32i8_load(ptr %p, <32 x i1> %a, <32 x i1> %b) {
67; AVX512-LABEL: bitcast_32i8_load:
68; AVX512:       # %bb.0:
69; AVX512-NEXT:    vpsllw $7, %ymm1, %ymm1 # encoding: [0xc5,0xf5,0x71,0xf1,0x07]
70; AVX512-NEXT:    vpmovb2m %zmm1, %k0 # encoding: [0x62,0xf2,0x7e,0x48,0x29,0xc1]
71; AVX512-NEXT:    vpsllw $7, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x71,0xf0,0x07]
72; AVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xef,0xc9]
73; AVX512-NEXT:    kmovd (%rdi), %k1 # EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x90,0x0f]
74; AVX512-NEXT:    vpcmpgtb %zmm0, %zmm1, %k2 {%k1} # encoding: [0x62,0xf1,0x75,0x49,0x64,0xd0]
75; AVX512-NEXT:    kandnd %k0, %k1, %k0 # encoding: [0xc4,0xe1,0xf5,0x42,0xc0]
76; AVX512-NEXT:    kord %k0, %k2, %k0 # encoding: [0xc4,0xe1,0xed,0x45,0xc0]
77; AVX512-NEXT:    vpmovm2b %k0, %zmm0 # encoding: [0x62,0xf2,0x7e,0x48,0x28,0xc0]
78; AVX512-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
79; AVX512-NEXT:    retq # encoding: [0xc3]
80  %mask = load i32, ptr %p
81  %vmask = bitcast i32 %mask to <32 x i1>
82  %res = select <32 x i1> %vmask, <32 x i1> %a, <32 x i1> %b
83  ret <32 x i1> %res
84}
85
86define <64 x i1> @bitcast_64i8_load(ptr %p, <64 x i1> %a, <64 x i1> %b) {
87; AVX512-LABEL: bitcast_64i8_load:
88; AVX512:       # %bb.0:
89; AVX512-NEXT:    vpsllw $7, %zmm1, %zmm1 # encoding: [0x62,0xf1,0x75,0x48,0x71,0xf1,0x07]
90; AVX512-NEXT:    vpmovb2m %zmm1, %k0 # encoding: [0x62,0xf2,0x7e,0x48,0x29,0xc1]
91; AVX512-NEXT:    vpsllw $7, %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0x71,0xf0,0x07]
92; AVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xef,0xc9]
93; AVX512-NEXT:    kmovq (%rdi), %k1 # EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf8,0x90,0x0f]
94; AVX512-NEXT:    vpcmpgtb %zmm0, %zmm1, %k2 {%k1} # encoding: [0x62,0xf1,0x75,0x49,0x64,0xd0]
95; AVX512-NEXT:    kandnq %k0, %k1, %k0 # encoding: [0xc4,0xe1,0xf4,0x42,0xc0]
96; AVX512-NEXT:    korq %k0, %k2, %k0 # encoding: [0xc4,0xe1,0xec,0x45,0xc0]
97; AVX512-NEXT:    vpmovm2b %k0, %zmm0 # encoding: [0x62,0xf2,0x7e,0x48,0x28,0xc0]
98; AVX512-NEXT:    retq # encoding: [0xc3]
99  %mask = load i64, ptr %p
100  %vmask = bitcast i64 %mask to <64 x i1>
101  %res = select <64 x i1> %vmask, <64 x i1> %a, <64 x i1> %b
102  ret <64 x i1> %res
103}
104