xref: /llvm-project/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll (revision 9122c5235ec85ce0c0ad337e862b006e7b349d84)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
3; RUN:   | FileCheck %s -check-prefixes=CHECK,RV64I
4; RUN: llc -mtriple=riscv64 -mattr=+zbb -verify-machineinstrs < %s \
5; RUN:   | FileCheck %s -check-prefixes=CHECK,RV64ZBB-ZBKB
6; RUN: llc -mtriple=riscv64 -mattr=+zbkb -verify-machineinstrs < %s \
7; RUN:   | FileCheck %s -check-prefixes=CHECK,RV64ZBB-ZBKB
8
9define signext i32 @andn_i32(i32 signext %a, i32 signext %b) nounwind {
10; RV64I-LABEL: andn_i32:
11; RV64I:       # %bb.0:
12; RV64I-NEXT:    not a1, a1
13; RV64I-NEXT:    and a0, a1, a0
14; RV64I-NEXT:    ret
15;
16; RV64ZBB-ZBKB-LABEL: andn_i32:
17; RV64ZBB-ZBKB:       # %bb.0:
18; RV64ZBB-ZBKB-NEXT:    andn a0, a0, a1
19; RV64ZBB-ZBKB-NEXT:    ret
20  %neg = xor i32 %b, -1
21  %and = and i32 %neg, %a
22  ret i32 %and
23}
24
25define i64 @andn_i64(i64 %a, i64 %b) nounwind {
26; RV64I-LABEL: andn_i64:
27; RV64I:       # %bb.0:
28; RV64I-NEXT:    not a1, a1
29; RV64I-NEXT:    and a0, a1, a0
30; RV64I-NEXT:    ret
31;
32; RV64ZBB-ZBKB-LABEL: andn_i64:
33; RV64ZBB-ZBKB:       # %bb.0:
34; RV64ZBB-ZBKB-NEXT:    andn a0, a0, a1
35; RV64ZBB-ZBKB-NEXT:    ret
36  %neg = xor i64 %b, -1
37  %and = and i64 %neg, %a
38  ret i64 %and
39}
40
41define signext i32 @orn_i32(i32 signext %a, i32 signext %b) nounwind {
42; RV64I-LABEL: orn_i32:
43; RV64I:       # %bb.0:
44; RV64I-NEXT:    not a1, a1
45; RV64I-NEXT:    or a0, a1, a0
46; RV64I-NEXT:    ret
47;
48; RV64ZBB-ZBKB-LABEL: orn_i32:
49; RV64ZBB-ZBKB:       # %bb.0:
50; RV64ZBB-ZBKB-NEXT:    orn a0, a0, a1
51; RV64ZBB-ZBKB-NEXT:    ret
52  %neg = xor i32 %b, -1
53  %or = or i32 %neg, %a
54  ret i32 %or
55}
56
57define i64 @orn_i64(i64 %a, i64 %b) nounwind {
58; RV64I-LABEL: orn_i64:
59; RV64I:       # %bb.0:
60; RV64I-NEXT:    not a1, a1
61; RV64I-NEXT:    or a0, a1, a0
62; RV64I-NEXT:    ret
63;
64; RV64ZBB-ZBKB-LABEL: orn_i64:
65; RV64ZBB-ZBKB:       # %bb.0:
66; RV64ZBB-ZBKB-NEXT:    orn a0, a0, a1
67; RV64ZBB-ZBKB-NEXT:    ret
68  %neg = xor i64 %b, -1
69  %or = or i64 %neg, %a
70  ret i64 %or
71}
72
73define signext i32 @xnor_i32(i32 signext %a, i32 signext %b) nounwind {
74; RV64I-LABEL: xnor_i32:
75; RV64I:       # %bb.0:
76; RV64I-NEXT:    xor a0, a0, a1
77; RV64I-NEXT:    not a0, a0
78; RV64I-NEXT:    ret
79;
80; RV64ZBB-ZBKB-LABEL: xnor_i32:
81; RV64ZBB-ZBKB:       # %bb.0:
82; RV64ZBB-ZBKB-NEXT:    xnor a0, a0, a1
83; RV64ZBB-ZBKB-NEXT:    ret
84  %neg = xor i32 %a, -1
85  %xor = xor i32 %neg, %b
86  ret i32 %xor
87}
88
89define i64 @xnor_i64(i64 %a, i64 %b) nounwind {
90; RV64I-LABEL: xnor_i64:
91; RV64I:       # %bb.0:
92; RV64I-NEXT:    xor a0, a0, a1
93; RV64I-NEXT:    not a0, a0
94; RV64I-NEXT:    ret
95;
96; RV64ZBB-ZBKB-LABEL: xnor_i64:
97; RV64ZBB-ZBKB:       # %bb.0:
98; RV64ZBB-ZBKB-NEXT:    xnor a0, a0, a1
99; RV64ZBB-ZBKB-NEXT:    ret
100  %neg = xor i64 %a, -1
101  %xor = xor i64 %neg, %b
102  ret i64 %xor
103}
104
105declare i32 @llvm.fshl.i32(i32, i32, i32)
106
107define signext i32 @rol_i32(i32 signext %a, i32 signext %b) nounwind {
108; RV64I-LABEL: rol_i32:
109; RV64I:       # %bb.0:
110; RV64I-NEXT:    sllw a2, a0, a1
111; RV64I-NEXT:    negw a1, a1
112; RV64I-NEXT:    srlw a0, a0, a1
113; RV64I-NEXT:    or a0, a2, a0
114; RV64I-NEXT:    ret
115;
116; RV64ZBB-ZBKB-LABEL: rol_i32:
117; RV64ZBB-ZBKB:       # %bb.0:
118; RV64ZBB-ZBKB-NEXT:    rolw a0, a0, a1
119; RV64ZBB-ZBKB-NEXT:    ret
120  %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 %b)
121  ret i32 %1
122}
123
124; Similar to rol_i32, but doesn't sign extend the result.
125define void @rol_i32_nosext(i32 signext %a, i32 signext %b, ptr %x) nounwind {
126; RV64I-LABEL: rol_i32_nosext:
127; RV64I:       # %bb.0:
128; RV64I-NEXT:    sllw a3, a0, a1
129; RV64I-NEXT:    negw a1, a1
130; RV64I-NEXT:    srlw a0, a0, a1
131; RV64I-NEXT:    or a0, a3, a0
132; RV64I-NEXT:    sw a0, 0(a2)
133; RV64I-NEXT:    ret
134;
135; RV64ZBB-ZBKB-LABEL: rol_i32_nosext:
136; RV64ZBB-ZBKB:       # %bb.0:
137; RV64ZBB-ZBKB-NEXT:    rolw a0, a0, a1
138; RV64ZBB-ZBKB-NEXT:    sw a0, 0(a2)
139; RV64ZBB-ZBKB-NEXT:    ret
140  %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 %b)
141  store i32 %1, ptr %x
142  ret void
143}
144
145define signext i32 @rol_i32_neg_constant_rhs(i32 signext %a) nounwind {
146; RV64I-LABEL: rol_i32_neg_constant_rhs:
147; RV64I:       # %bb.0:
148; RV64I-NEXT:    li a1, -2
149; RV64I-NEXT:    negw a2, a0
150; RV64I-NEXT:    sllw a0, a1, a0
151; RV64I-NEXT:    srlw a1, a1, a2
152; RV64I-NEXT:    or a0, a0, a1
153; RV64I-NEXT:    ret
154;
155; RV64ZBB-ZBKB-LABEL: rol_i32_neg_constant_rhs:
156; RV64ZBB-ZBKB:       # %bb.0:
157; RV64ZBB-ZBKB-NEXT:    li a1, -2
158; RV64ZBB-ZBKB-NEXT:    rolw a0, a1, a0
159; RV64ZBB-ZBKB-NEXT:    ret
160  %1 = tail call i32 @llvm.fshl.i32(i32 -2, i32 -2, i32 %a)
161  ret i32 %1
162}
163
164declare i64 @llvm.fshl.i64(i64, i64, i64)
165
166define i64 @rol_i64(i64 %a, i64 %b) nounwind {
167; RV64I-LABEL: rol_i64:
168; RV64I:       # %bb.0:
169; RV64I-NEXT:    sll a2, a0, a1
170; RV64I-NEXT:    negw a1, a1
171; RV64I-NEXT:    srl a0, a0, a1
172; RV64I-NEXT:    or a0, a2, a0
173; RV64I-NEXT:    ret
174;
175; RV64ZBB-ZBKB-LABEL: rol_i64:
176; RV64ZBB-ZBKB:       # %bb.0:
177; RV64ZBB-ZBKB-NEXT:    rol a0, a0, a1
178; RV64ZBB-ZBKB-NEXT:    ret
179  %or = tail call i64 @llvm.fshl.i64(i64 %a, i64 %a, i64 %b)
180  ret i64 %or
181}
182
183declare i32 @llvm.fshr.i32(i32, i32, i32)
184
185define signext i32 @ror_i32(i32 signext %a, i32 signext %b) nounwind {
186; RV64I-LABEL: ror_i32:
187; RV64I:       # %bb.0:
188; RV64I-NEXT:    srlw a2, a0, a1
189; RV64I-NEXT:    negw a1, a1
190; RV64I-NEXT:    sllw a0, a0, a1
191; RV64I-NEXT:    or a0, a2, a0
192; RV64I-NEXT:    ret
193;
194; RV64ZBB-ZBKB-LABEL: ror_i32:
195; RV64ZBB-ZBKB:       # %bb.0:
196; RV64ZBB-ZBKB-NEXT:    rorw a0, a0, a1
197; RV64ZBB-ZBKB-NEXT:    ret
198  %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 %b)
199  ret i32 %1
200}
201
202; Similar to ror_i32, but doesn't sign extend the result.
203define void @ror_i32_nosext(i32 signext %a, i32 signext %b, ptr %x) nounwind {
204; RV64I-LABEL: ror_i32_nosext:
205; RV64I:       # %bb.0:
206; RV64I-NEXT:    srlw a3, a0, a1
207; RV64I-NEXT:    negw a1, a1
208; RV64I-NEXT:    sllw a0, a0, a1
209; RV64I-NEXT:    or a0, a3, a0
210; RV64I-NEXT:    sw a0, 0(a2)
211; RV64I-NEXT:    ret
212;
213; RV64ZBB-ZBKB-LABEL: ror_i32_nosext:
214; RV64ZBB-ZBKB:       # %bb.0:
215; RV64ZBB-ZBKB-NEXT:    rorw a0, a0, a1
216; RV64ZBB-ZBKB-NEXT:    sw a0, 0(a2)
217; RV64ZBB-ZBKB-NEXT:    ret
218  %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 %b)
219  store i32 %1, ptr %x
220  ret void
221}
222
223define signext i32 @ror_i32_neg_constant_rhs(i32 signext %a) nounwind {
224; RV64I-LABEL: ror_i32_neg_constant_rhs:
225; RV64I:       # %bb.0:
226; RV64I-NEXT:    li a1, -2
227; RV64I-NEXT:    negw a2, a0
228; RV64I-NEXT:    srlw a0, a1, a0
229; RV64I-NEXT:    sllw a1, a1, a2
230; RV64I-NEXT:    or a0, a0, a1
231; RV64I-NEXT:    ret
232;
233; RV64ZBB-ZBKB-LABEL: ror_i32_neg_constant_rhs:
234; RV64ZBB-ZBKB:       # %bb.0:
235; RV64ZBB-ZBKB-NEXT:    li a1, -2
236; RV64ZBB-ZBKB-NEXT:    rorw a0, a1, a0
237; RV64ZBB-ZBKB-NEXT:    ret
238  %1 = tail call i32 @llvm.fshr.i32(i32 -2, i32 -2, i32 %a)
239  ret i32 %1
240}
241
242declare i64 @llvm.fshr.i64(i64, i64, i64)
243
244define i64 @ror_i64(i64 %a, i64 %b) nounwind {
245; RV64I-LABEL: ror_i64:
246; RV64I:       # %bb.0:
247; RV64I-NEXT:    srl a2, a0, a1
248; RV64I-NEXT:    negw a1, a1
249; RV64I-NEXT:    sll a0, a0, a1
250; RV64I-NEXT:    or a0, a2, a0
251; RV64I-NEXT:    ret
252;
253; RV64ZBB-ZBKB-LABEL: ror_i64:
254; RV64ZBB-ZBKB:       # %bb.0:
255; RV64ZBB-ZBKB-NEXT:    ror a0, a0, a1
256; RV64ZBB-ZBKB-NEXT:    ret
257  %or = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 %b)
258  ret i64 %or
259}
260
261define signext i32 @rori_i32_fshl(i32 signext %a) nounwind {
262; RV64I-LABEL: rori_i32_fshl:
263; RV64I:       # %bb.0:
264; RV64I-NEXT:    srliw a1, a0, 1
265; RV64I-NEXT:    slliw a0, a0, 31
266; RV64I-NEXT:    or a0, a0, a1
267; RV64I-NEXT:    ret
268;
269; RV64ZBB-ZBKB-LABEL: rori_i32_fshl:
270; RV64ZBB-ZBKB:       # %bb.0:
271; RV64ZBB-ZBKB-NEXT:    roriw a0, a0, 1
272; RV64ZBB-ZBKB-NEXT:    ret
273  %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 31)
274  ret i32 %1
275}
276
277; Similar to rori_i32_fshl, but doesn't sign extend the result.
278define void @rori_i32_fshl_nosext(i32 signext %a, ptr %x) nounwind {
279; RV64I-LABEL: rori_i32_fshl_nosext:
280; RV64I:       # %bb.0:
281; RV64I-NEXT:    srliw a2, a0, 1
282; RV64I-NEXT:    slli a0, a0, 31
283; RV64I-NEXT:    or a0, a0, a2
284; RV64I-NEXT:    sw a0, 0(a1)
285; RV64I-NEXT:    ret
286;
287; RV64ZBB-ZBKB-LABEL: rori_i32_fshl_nosext:
288; RV64ZBB-ZBKB:       # %bb.0:
289; RV64ZBB-ZBKB-NEXT:    roriw a0, a0, 1
290; RV64ZBB-ZBKB-NEXT:    sw a0, 0(a1)
291; RV64ZBB-ZBKB-NEXT:    ret
292  %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 31)
293  store i32 %1, ptr %x
294  ret void
295}
296
297define signext i32 @rori_i32_fshr(i32 signext %a) nounwind {
298; RV64I-LABEL: rori_i32_fshr:
299; RV64I:       # %bb.0:
300; RV64I-NEXT:    slliw a1, a0, 1
301; RV64I-NEXT:    srliw a0, a0, 31
302; RV64I-NEXT:    or a0, a0, a1
303; RV64I-NEXT:    ret
304;
305; RV64ZBB-ZBKB-LABEL: rori_i32_fshr:
306; RV64ZBB-ZBKB:       # %bb.0:
307; RV64ZBB-ZBKB-NEXT:    roriw a0, a0, 31
308; RV64ZBB-ZBKB-NEXT:    ret
309  %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 31)
310  ret i32 %1
311}
312
313; Similar to rori_i32_fshr, but doesn't sign extend the result.
314define void @rori_i32_fshr_nosext(i32 signext %a, ptr %x) nounwind {
315; RV64I-LABEL: rori_i32_fshr_nosext:
316; RV64I:       # %bb.0:
317; RV64I-NEXT:    slli a2, a0, 1
318; RV64I-NEXT:    srliw a0, a0, 31
319; RV64I-NEXT:    or a0, a0, a2
320; RV64I-NEXT:    sw a0, 0(a1)
321; RV64I-NEXT:    ret
322;
323; RV64ZBB-ZBKB-LABEL: rori_i32_fshr_nosext:
324; RV64ZBB-ZBKB:       # %bb.0:
325; RV64ZBB-ZBKB-NEXT:    roriw a0, a0, 31
326; RV64ZBB-ZBKB-NEXT:    sw a0, 0(a1)
327; RV64ZBB-ZBKB-NEXT:    ret
328  %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 31)
329  store i32 %1, ptr %x
330  ret void
331}
332
333; This test is similar to the type legalized version of the fshl/fshr tests, but
334; instead of having the same input to both shifts it has different inputs. Make
335; sure we don't match it as a roriw.
336define signext i32 @not_rori_i32(i32 signext %x, i32 signext %y) nounwind {
337; CHECK-LABEL: not_rori_i32:
338; CHECK:       # %bb.0:
339; CHECK-NEXT:    slliw a0, a0, 31
340; CHECK-NEXT:    srliw a1, a1, 1
341; CHECK-NEXT:    or a0, a0, a1
342; CHECK-NEXT:    ret
343  %a = shl i32 %x, 31
344  %b = lshr i32 %y, 1
345  %c = or i32 %a, %b
346  ret i32 %c
347}
348
349; This is similar to the type legalized roriw pattern, but the and mask is more
350; than 32 bits so the lshr doesn't shift zeroes into the lower 32 bits. Make
351; sure we don't match it to roriw.
352define i64 @roriw_bug(i64 %x) nounwind {
353; CHECK-LABEL: roriw_bug:
354; CHECK:       # %bb.0:
355; CHECK-NEXT:    slli a1, a0, 31
356; CHECK-NEXT:    andi a2, a0, -2
357; CHECK-NEXT:    srli a0, a0, 1
358; CHECK-NEXT:    or a0, a1, a0
359; CHECK-NEXT:    sext.w a0, a0
360; CHECK-NEXT:    xor a0, a2, a0
361; CHECK-NEXT:    ret
362  %a = shl i64 %x, 31
363  %b = and i64 %x, 18446744073709551614
364  %c = lshr i64 %b, 1
365  %d = or i64 %a, %c
366  %e = shl i64 %d, 32
367  %f = ashr i64 %e, 32
368  %g = xor i64 %b, %f ; to increase the use count on %b to disable SimplifyDemandedBits.
369  ret i64 %g
370}
371
372define i64 @rori_i64_fshl(i64 %a) nounwind {
373; RV64I-LABEL: rori_i64_fshl:
374; RV64I:       # %bb.0:
375; RV64I-NEXT:    srli a1, a0, 1
376; RV64I-NEXT:    slli a0, a0, 63
377; RV64I-NEXT:    or a0, a0, a1
378; RV64I-NEXT:    ret
379;
380; RV64ZBB-ZBKB-LABEL: rori_i64_fshl:
381; RV64ZBB-ZBKB:       # %bb.0:
382; RV64ZBB-ZBKB-NEXT:    rori a0, a0, 1
383; RV64ZBB-ZBKB-NEXT:    ret
384  %1 = tail call i64 @llvm.fshl.i64(i64 %a, i64 %a, i64 63)
385  ret i64 %1
386}
387
388define i64 @rori_i64_fshr(i64 %a) nounwind {
389; RV64I-LABEL: rori_i64_fshr:
390; RV64I:       # %bb.0:
391; RV64I-NEXT:    slli a1, a0, 1
392; RV64I-NEXT:    srli a0, a0, 63
393; RV64I-NEXT:    or a0, a0, a1
394; RV64I-NEXT:    ret
395;
396; RV64ZBB-ZBKB-LABEL: rori_i64_fshr:
397; RV64ZBB-ZBKB:       # %bb.0:
398; RV64ZBB-ZBKB-NEXT:    rori a0, a0, 63
399; RV64ZBB-ZBKB-NEXT:    ret
400  %1 = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 63)
401  ret i64 %1
402}
403
404define signext i32 @not_shl_one_i32(i32 signext %x) {
405; RV64I-LABEL: not_shl_one_i32:
406; RV64I:       # %bb.0:
407; RV64I-NEXT:    li a1, 1
408; RV64I-NEXT:    sllw a0, a1, a0
409; RV64I-NEXT:    not a0, a0
410; RV64I-NEXT:    ret
411;
412; RV64ZBB-ZBKB-LABEL: not_shl_one_i32:
413; RV64ZBB-ZBKB:       # %bb.0:
414; RV64ZBB-ZBKB-NEXT:    li a1, -2
415; RV64ZBB-ZBKB-NEXT:    rolw a0, a1, a0
416; RV64ZBB-ZBKB-NEXT:    ret
417  %1 = shl i32 1, %x
418  %2 = xor i32 %1, -1
419  ret i32 %2
420}
421
422define i64 @not_shl_one_i64(i64 %x) {
423; RV64I-LABEL: not_shl_one_i64:
424; RV64I:       # %bb.0:
425; RV64I-NEXT:    li a1, 1
426; RV64I-NEXT:    sll a0, a1, a0
427; RV64I-NEXT:    not a0, a0
428; RV64I-NEXT:    ret
429;
430; RV64ZBB-ZBKB-LABEL: not_shl_one_i64:
431; RV64ZBB-ZBKB:       # %bb.0:
432; RV64ZBB-ZBKB-NEXT:    li a1, -2
433; RV64ZBB-ZBKB-NEXT:    rol a0, a1, a0
434; RV64ZBB-ZBKB-NEXT:    ret
435  %1 = shl i64 1, %x
436  %2 = xor i64 %1, -1
437  ret i64 %2
438}
439
440define i8 @srli_i8(i8 %a) nounwind {
441; CHECK-LABEL: srli_i8:
442; CHECK:       # %bb.0:
443; CHECK-NEXT:    slli a0, a0, 56
444; CHECK-NEXT:    srli a0, a0, 62
445; CHECK-NEXT:    ret
446  %1 = lshr i8 %a, 6
447  ret i8 %1
448}
449
450; We could use sext.b+srai, but slli+srai offers more opportunities for
451; comppressed instructions.
452define i8 @srai_i8(i8 %a) nounwind {
453; CHECK-LABEL: srai_i8:
454; CHECK:       # %bb.0:
455; CHECK-NEXT:    slli a0, a0, 56
456; CHECK-NEXT:    srai a0, a0, 61
457; CHECK-NEXT:    ret
458  %1 = ashr i8 %a, 5
459  ret i8 %1
460}
461
462; We could use zext.h+srli, but slli+srli offers more opportunities for
463; comppressed instructions.
464define i16 @srli_i16(i16 %a) nounwind {
465; CHECK-LABEL: srli_i16:
466; CHECK:       # %bb.0:
467; CHECK-NEXT:    slli a0, a0, 48
468; CHECK-NEXT:    srli a0, a0, 54
469; CHECK-NEXT:    ret
470  %1 = lshr i16 %a, 6
471  ret i16 %1
472}
473
474; We could use sext.h+srai, but slli+srai offers more opportunities for
475; comppressed instructions.
476define i16 @srai_i16(i16 %a) nounwind {
477; CHECK-LABEL: srai_i16:
478; CHECK:       # %bb.0:
479; CHECK-NEXT:    slli a0, a0, 48
480; CHECK-NEXT:    srai a0, a0, 57
481; CHECK-NEXT:    ret
482  %1 = ashr i16 %a, 9
483  ret i16 %1
484}
485
486define i1 @andn_seqz_i32(i32 signext %a, i32 signext %b) nounwind {
487; RV64I-LABEL: andn_seqz_i32:
488; RV64I:       # %bb.0:
489; RV64I-NEXT:    and a0, a0, a1
490; RV64I-NEXT:    xor a0, a0, a1
491; RV64I-NEXT:    seqz a0, a0
492; RV64I-NEXT:    ret
493;
494; RV64ZBB-ZBKB-LABEL: andn_seqz_i32:
495; RV64ZBB-ZBKB:       # %bb.0:
496; RV64ZBB-ZBKB-NEXT:    andn a0, a1, a0
497; RV64ZBB-ZBKB-NEXT:    seqz a0, a0
498; RV64ZBB-ZBKB-NEXT:    ret
499  %and = and i32 %a, %b
500  %cmpeq = icmp eq i32 %and, %b
501  ret i1 %cmpeq
502}
503
504define i1 @andn_seqz_i64(i64 %a, i64 %b) nounwind {
505; RV64I-LABEL: andn_seqz_i64:
506; RV64I:       # %bb.0:
507; RV64I-NEXT:    and a0, a0, a1
508; RV64I-NEXT:    xor a0, a0, a1
509; RV64I-NEXT:    seqz a0, a0
510; RV64I-NEXT:    ret
511;
512; RV64ZBB-ZBKB-LABEL: andn_seqz_i64:
513; RV64ZBB-ZBKB:       # %bb.0:
514; RV64ZBB-ZBKB-NEXT:    andn a0, a1, a0
515; RV64ZBB-ZBKB-NEXT:    seqz a0, a0
516; RV64ZBB-ZBKB-NEXT:    ret
517  %and = and i64 %a, %b
518  %cmpeq = icmp eq i64 %and, %b
519  ret i1 %cmpeq
520}
521
522define i1 @andn_snez_i32(i32 signext %a, i32 signext %b) nounwind {
523; RV64I-LABEL: andn_snez_i32:
524; RV64I:       # %bb.0:
525; RV64I-NEXT:    and a0, a0, a1
526; RV64I-NEXT:    xor a0, a0, a1
527; RV64I-NEXT:    snez a0, a0
528; RV64I-NEXT:    ret
529;
530; RV64ZBB-ZBKB-LABEL: andn_snez_i32:
531; RV64ZBB-ZBKB:       # %bb.0:
532; RV64ZBB-ZBKB-NEXT:    andn a0, a1, a0
533; RV64ZBB-ZBKB-NEXT:    snez a0, a0
534; RV64ZBB-ZBKB-NEXT:    ret
535  %and = and i32 %a, %b
536  %cmpeq = icmp ne i32 %and, %b
537  ret i1 %cmpeq
538}
539
540define i1 @andn_snez_i64(i64 %a, i64 %b) nounwind {
541; RV64I-LABEL: andn_snez_i64:
542; RV64I:       # %bb.0:
543; RV64I-NEXT:    and a0, a0, a1
544; RV64I-NEXT:    xor a0, a0, a1
545; RV64I-NEXT:    snez a0, a0
546; RV64I-NEXT:    ret
547;
548; RV64ZBB-ZBKB-LABEL: andn_snez_i64:
549; RV64ZBB-ZBKB:       # %bb.0:
550; RV64ZBB-ZBKB-NEXT:    andn a0, a1, a0
551; RV64ZBB-ZBKB-NEXT:    snez a0, a0
552; RV64ZBB-ZBKB-NEXT:    ret
553  %and = and i64 %a, %b
554  %cmpeq = icmp ne i64 %and, %b
555  ret i1 %cmpeq
556}
557