xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll (revision 74f985b793bf4005e49736f8c2cef8b5cbf7c1ab)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
3; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
4; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
5; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
6
7define void @and_v8i1(ptr %x, ptr %y) {
8; CHECK-LABEL: and_v8i1:
9; CHECK:       # %bb.0:
10; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
11; CHECK-NEXT:    vlm.v v8, (a0)
12; CHECK-NEXT:    vlm.v v9, (a1)
13; CHECK-NEXT:    vmand.mm v8, v8, v9
14; CHECK-NEXT:    vsm.v v8, (a0)
15; CHECK-NEXT:    ret
16  %a = load <8 x i1>, ptr %x
17  %b = load <8 x i1>, ptr %y
18  %c = and <8 x i1> %a, %b
19  store <8 x i1> %c, ptr %x
20  ret void
21}
22
23define void @or_v16i1(ptr %x, ptr %y) {
24; CHECK-LABEL: or_v16i1:
25; CHECK:       # %bb.0:
26; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
27; CHECK-NEXT:    vlm.v v8, (a0)
28; CHECK-NEXT:    vlm.v v9, (a1)
29; CHECK-NEXT:    vmor.mm v8, v8, v9
30; CHECK-NEXT:    vsm.v v8, (a0)
31; CHECK-NEXT:    ret
32  %a = load <16 x i1>, ptr %x
33  %b = load <16 x i1>, ptr %y
34  %c = or <16 x i1> %a, %b
35  store <16 x i1> %c, ptr %x
36  ret void
37}
38
39define void @xor_v32i1(ptr %x, ptr %y) {
40; CHECK-LABEL: xor_v32i1:
41; CHECK:       # %bb.0:
42; CHECK-NEXT:    li a2, 32
43; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, ma
44; CHECK-NEXT:    vlm.v v8, (a0)
45; CHECK-NEXT:    vlm.v v9, (a1)
46; CHECK-NEXT:    vmxor.mm v8, v8, v9
47; CHECK-NEXT:    vsm.v v8, (a0)
48; CHECK-NEXT:    ret
49  %a = load <32 x i1>, ptr %x
50  %b = load <32 x i1>, ptr %y
51  %c = xor <32 x i1> %a, %b
52  store <32 x i1> %c, ptr %x
53  ret void
54}
55
56define void @not_v64i1(ptr %x, ptr %y) {
57; CHECK-LABEL: not_v64i1:
58; CHECK:       # %bb.0:
59; CHECK-NEXT:    li a1, 64
60; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
61; CHECK-NEXT:    vlm.v v8, (a0)
62; CHECK-NEXT:    vmnot.m v8, v8
63; CHECK-NEXT:    vsm.v v8, (a0)
64; CHECK-NEXT:    ret
65  %a = load <64 x i1>, ptr %x
66  %b = load <64 x i1>, ptr %y
67  %c = xor <64 x i1> %a, <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>
68  store <64 x i1> %c, ptr %x
69  ret void
70}
71
72define void @andnot_v8i1(ptr %x, ptr %y) {
73; CHECK-LABEL: andnot_v8i1:
74; CHECK:       # %bb.0:
75; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
76; CHECK-NEXT:    vlm.v v8, (a0)
77; CHECK-NEXT:    vlm.v v9, (a1)
78; CHECK-NEXT:    vmandn.mm v8, v9, v8
79; CHECK-NEXT:    vsm.v v8, (a0)
80; CHECK-NEXT:    ret
81  %a = load <8 x i1>, ptr %x
82  %b = load <8 x i1>, ptr %y
83  %c = xor <8 x i1> %a, <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>
84  %d = and <8 x i1> %b, %c
85  store <8 x i1> %d, ptr %x
86  ret void
87}
88
89define void @ornot_v16i1(ptr %x, ptr %y) {
90; CHECK-LABEL: ornot_v16i1:
91; CHECK:       # %bb.0:
92; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
93; CHECK-NEXT:    vlm.v v8, (a0)
94; CHECK-NEXT:    vlm.v v9, (a1)
95; CHECK-NEXT:    vmorn.mm v8, v9, v8
96; CHECK-NEXT:    vsm.v v8, (a0)
97; CHECK-NEXT:    ret
98  %a = load <16 x i1>, ptr %x
99  %b = load <16 x i1>, ptr %y
100  %c = xor <16 x i1> %a, <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>
101  %d = or <16 x i1> %b, %c
102  store <16 x i1> %d, ptr %x
103  ret void
104}
105
106define void @xornot_v32i1(ptr %x, ptr %y) {
107; CHECK-LABEL: xornot_v32i1:
108; CHECK:       # %bb.0:
109; CHECK-NEXT:    li a2, 32
110; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, ma
111; CHECK-NEXT:    vlm.v v8, (a0)
112; CHECK-NEXT:    vlm.v v9, (a1)
113; CHECK-NEXT:    vmxnor.mm v8, v8, v9
114; CHECK-NEXT:    vsm.v v8, (a0)
115; CHECK-NEXT:    ret
116  %a = load <32 x i1>, ptr %x
117  %b = load <32 x i1>, ptr %y
118  %c = xor <32 x i1> %a, <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>
119  %d = xor <32 x i1> %b, %c
120  store <32 x i1> %d, ptr %x
121  ret void
122}
123
124define void @nand_v8i1(ptr %x, ptr %y) {
125; CHECK-LABEL: nand_v8i1:
126; CHECK:       # %bb.0:
127; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
128; CHECK-NEXT:    vlm.v v8, (a0)
129; CHECK-NEXT:    vlm.v v9, (a1)
130; CHECK-NEXT:    vmnand.mm v8, v8, v9
131; CHECK-NEXT:    vsm.v v8, (a0)
132; CHECK-NEXT:    ret
133  %a = load <8 x i1>, ptr %x
134  %b = load <8 x i1>, ptr %y
135  %c = and <8 x i1> %a, %b
136  %d = xor <8 x i1> %c, <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>
137  store <8 x i1> %d, ptr %x
138  ret void
139}
140
141define void @nor_v16i1(ptr %x, ptr %y) {
142; CHECK-LABEL: nor_v16i1:
143; CHECK:       # %bb.0:
144; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
145; CHECK-NEXT:    vlm.v v8, (a0)
146; CHECK-NEXT:    vlm.v v9, (a1)
147; CHECK-NEXT:    vmnor.mm v8, v8, v9
148; CHECK-NEXT:    vsm.v v8, (a0)
149; CHECK-NEXT:    ret
150  %a = load <16 x i1>, ptr %x
151  %b = load <16 x i1>, ptr %y
152  %c = or <16 x i1> %a, %b
153  %d = xor <16 x i1> %c, <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>
154  store <16 x i1> %d, ptr %x
155  ret void
156}
157
158define void @xnor_v32i1(ptr %x, ptr %y) {
159; CHECK-LABEL: xnor_v32i1:
160; CHECK:       # %bb.0:
161; CHECK-NEXT:    li a2, 32
162; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, ma
163; CHECK-NEXT:    vlm.v v8, (a0)
164; CHECK-NEXT:    vlm.v v9, (a1)
165; CHECK-NEXT:    vmxnor.mm v8, v8, v9
166; CHECK-NEXT:    vsm.v v8, (a0)
167; CHECK-NEXT:    ret
168  %a = load <32 x i1>, ptr %x
169  %b = load <32 x i1>, ptr %y
170  %c = xor <32 x i1> %a, %b
171  %d = xor <32 x i1> %c, <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>
172  store <32 x i1> %d, ptr %x
173  ret void
174}
175