xref: /llvm-project/llvm/test/CodeGen/RISCV/alu8.ll (revision 9122c5235ec85ce0c0ad337e862b006e7b349d84)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
3; RUN:   | FileCheck %s -check-prefix=RV32I
4; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
5; RUN:   | FileCheck %s -check-prefix=RV64I
6
7; These tests are identical to those in alu32.ll but operate on i8. They check
8; that legalisation of these non-native types doesn't introduce unnecessary
9; inefficiencies.
10
11define i8 @addi(i8 %a) nounwind {
12; RV32I-LABEL: addi:
13; RV32I:       # %bb.0:
14; RV32I-NEXT:    addi a0, a0, 1
15; RV32I-NEXT:    ret
16;
17; RV64I-LABEL: addi:
18; RV64I:       # %bb.0:
19; RV64I-NEXT:    addi a0, a0, 1
20; RV64I-NEXT:    ret
21  %1 = add i8 %a, 1
22  ret i8 %1
23}
24
25define i8 @slti(i8 %a) nounwind {
26; RV32I-LABEL: slti:
27; RV32I:       # %bb.0:
28; RV32I-NEXT:    slli a0, a0, 24
29; RV32I-NEXT:    srai a0, a0, 24
30; RV32I-NEXT:    slti a0, a0, 2
31; RV32I-NEXT:    ret
32;
33; RV64I-LABEL: slti:
34; RV64I:       # %bb.0:
35; RV64I-NEXT:    slli a0, a0, 56
36; RV64I-NEXT:    srai a0, a0, 56
37; RV64I-NEXT:    slti a0, a0, 2
38; RV64I-NEXT:    ret
39  %1 = icmp slt i8 %a, 2
40  %2 = zext i1 %1 to i8
41  ret i8 %2
42}
43
44define i8 @sltiu(i8 %a) nounwind {
45; RV32I-LABEL: sltiu:
46; RV32I:       # %bb.0:
47; RV32I-NEXT:    andi a0, a0, 255
48; RV32I-NEXT:    sltiu a0, a0, 3
49; RV32I-NEXT:    ret
50;
51; RV64I-LABEL: sltiu:
52; RV64I:       # %bb.0:
53; RV64I-NEXT:    andi a0, a0, 255
54; RV64I-NEXT:    sltiu a0, a0, 3
55; RV64I-NEXT:    ret
56  %1 = icmp ult i8 %a, 3
57  %2 = zext i1 %1 to i8
58  ret i8 %2
59}
60
61; Make sure we avoid an AND, if the input of an unsigned compare is known
62; to be sign extended. This can occur due to InstCombine canonicalizing
63; x s>= 0 && x s< 10 to x u< 10.
64define i8 @sltiu_signext(i8 signext %a) nounwind {
65; RV32I-LABEL: sltiu_signext:
66; RV32I:       # %bb.0:
67; RV32I-NEXT:    sltiu a0, a0, 10
68; RV32I-NEXT:    ret
69;
70; RV64I-LABEL: sltiu_signext:
71; RV64I:       # %bb.0:
72; RV64I-NEXT:    sltiu a0, a0, 10
73; RV64I-NEXT:    ret
74  %1 = icmp ult i8 %a, 10
75  %2 = zext i1 %1 to i8
76  ret i8 %2
77}
78
79define i8 @xori(i8 %a) nounwind {
80; RV32I-LABEL: xori:
81; RV32I:       # %bb.0:
82; RV32I-NEXT:    xori a0, a0, 4
83; RV32I-NEXT:    ret
84;
85; RV64I-LABEL: xori:
86; RV64I:       # %bb.0:
87; RV64I-NEXT:    xori a0, a0, 4
88; RV64I-NEXT:    ret
89  %1 = xor i8 %a, 4
90  ret i8 %1
91}
92
93define i8 @ori(i8 %a) nounwind {
94; RV32I-LABEL: ori:
95; RV32I:       # %bb.0:
96; RV32I-NEXT:    ori a0, a0, 5
97; RV32I-NEXT:    ret
98;
99; RV64I-LABEL: ori:
100; RV64I:       # %bb.0:
101; RV64I-NEXT:    ori a0, a0, 5
102; RV64I-NEXT:    ret
103  %1 = or i8 %a, 5
104  ret i8 %1
105}
106
107define i8 @andi(i8 %a) nounwind {
108; RV32I-LABEL: andi:
109; RV32I:       # %bb.0:
110; RV32I-NEXT:    andi a0, a0, 6
111; RV32I-NEXT:    ret
112;
113; RV64I-LABEL: andi:
114; RV64I:       # %bb.0:
115; RV64I-NEXT:    andi a0, a0, 6
116; RV64I-NEXT:    ret
117  %1 = and i8 %a, 6
118  ret i8 %1
119}
120
121define i8 @slli(i8 %a) nounwind {
122; RV32I-LABEL: slli:
123; RV32I:       # %bb.0:
124; RV32I-NEXT:    slli a0, a0, 7
125; RV32I-NEXT:    ret
126;
127; RV64I-LABEL: slli:
128; RV64I:       # %bb.0:
129; RV64I-NEXT:    slli a0, a0, 7
130; RV64I-NEXT:    ret
131  %1 = shl i8 %a, 7
132  ret i8 %1
133}
134
135define i8 @srli(i8 %a) nounwind {
136; RV32I-LABEL: srli:
137; RV32I:       # %bb.0:
138; RV32I-NEXT:    slli a0, a0, 24
139; RV32I-NEXT:    srli a0, a0, 30
140; RV32I-NEXT:    ret
141;
142; RV64I-LABEL: srli:
143; RV64I:       # %bb.0:
144; RV64I-NEXT:    slli a0, a0, 56
145; RV64I-NEXT:    srli a0, a0, 62
146; RV64I-NEXT:    ret
147  %1 = lshr i8 %a, 6
148  ret i8 %1
149}
150
151define i8 @srai(i8 %a) nounwind {
152; RV32I-LABEL: srai:
153; RV32I:       # %bb.0:
154; RV32I-NEXT:    slli a0, a0, 24
155; RV32I-NEXT:    srai a0, a0, 29
156; RV32I-NEXT:    ret
157;
158; RV64I-LABEL: srai:
159; RV64I:       # %bb.0:
160; RV64I-NEXT:    slli a0, a0, 56
161; RV64I-NEXT:    srai a0, a0, 61
162; RV64I-NEXT:    ret
163  %1 = ashr i8 %a, 5
164  ret i8 %1
165}
166
167
168define i8 @add(i8 %a, i8 %b) nounwind {
169; RV32I-LABEL: add:
170; RV32I:       # %bb.0:
171; RV32I-NEXT:    add a0, a0, a1
172; RV32I-NEXT:    ret
173;
174; RV64I-LABEL: add:
175; RV64I:       # %bb.0:
176; RV64I-NEXT:    add a0, a0, a1
177; RV64I-NEXT:    ret
178  %1 = add i8 %a, %b
179  ret i8 %1
180}
181
182define i8 @sub(i8 %a, i8 %b) nounwind {
183; RV32I-LABEL: sub:
184; RV32I:       # %bb.0:
185; RV32I-NEXT:    sub a0, a0, a1
186; RV32I-NEXT:    ret
187;
188; RV64I-LABEL: sub:
189; RV64I:       # %bb.0:
190; RV64I-NEXT:    sub a0, a0, a1
191; RV64I-NEXT:    ret
192  %1 = sub i8 %a, %b
193  ret i8 %1
194}
195
196define i8 @sll(i8 %a, i8 %b) nounwind {
197; RV32I-LABEL: sll:
198; RV32I:       # %bb.0:
199; RV32I-NEXT:    sll a0, a0, a1
200; RV32I-NEXT:    ret
201;
202; RV64I-LABEL: sll:
203; RV64I:       # %bb.0:
204; RV64I-NEXT:    sll a0, a0, a1
205; RV64I-NEXT:    ret
206  %1 = shl i8 %a, %b
207  ret i8 %1
208}
209
210; Test the pattern we get from C integer promotion.
211define void @sll_ext(i8 %a, i32 signext %b, ptr %p) nounwind {
212; RV32I-LABEL: sll_ext:
213; RV32I:       # %bb.0:
214; RV32I-NEXT:    sll a0, a0, a1
215; RV32I-NEXT:    sb a0, 0(a2)
216; RV32I-NEXT:    ret
217;
218; RV64I-LABEL: sll_ext:
219; RV64I:       # %bb.0:
220; RV64I-NEXT:    sllw a0, a0, a1
221; RV64I-NEXT:    sb a0, 0(a2)
222; RV64I-NEXT:    ret
223  %1 = zext i8 %a to i32
224  %2 = shl i32 %1, %b
225  %3 = trunc i32 %2 to i8
226  store i8 %3, ptr %p
227  ret void
228}
229
230; Test the pattern we get from C integer promotion. This time with poison
231; generating flags.
232define void @sll_ext_drop_poison(i8 %a, i32 signext %b, ptr %p) nounwind {
233; RV32I-LABEL: sll_ext_drop_poison:
234; RV32I:       # %bb.0:
235; RV32I-NEXT:    sll a0, a0, a1
236; RV32I-NEXT:    sb a0, 0(a2)
237; RV32I-NEXT:    ret
238;
239; RV64I-LABEL: sll_ext_drop_poison:
240; RV64I:       # %bb.0:
241; RV64I-NEXT:    sllw a0, a0, a1
242; RV64I-NEXT:    sb a0, 0(a2)
243; RV64I-NEXT:    ret
244  %1 = zext i8 %a to i32
245  %2 = shl nuw nsw i32 %1, %b
246  %3 = trunc i32 %2 to i8
247  store i8 %3, ptr %p
248  ret void
249}
250
251define i8 @slt(i8 %a, i8 %b) nounwind {
252; RV32I-LABEL: slt:
253; RV32I:       # %bb.0:
254; RV32I-NEXT:    slli a1, a1, 24
255; RV32I-NEXT:    slli a0, a0, 24
256; RV32I-NEXT:    srai a1, a1, 24
257; RV32I-NEXT:    srai a0, a0, 24
258; RV32I-NEXT:    slt a0, a0, a1
259; RV32I-NEXT:    ret
260;
261; RV64I-LABEL: slt:
262; RV64I:       # %bb.0:
263; RV64I-NEXT:    slli a1, a1, 56
264; RV64I-NEXT:    slli a0, a0, 56
265; RV64I-NEXT:    srai a1, a1, 56
266; RV64I-NEXT:    srai a0, a0, 56
267; RV64I-NEXT:    slt a0, a0, a1
268; RV64I-NEXT:    ret
269  %1 = icmp slt i8 %a, %b
270  %2 = zext i1 %1 to i8
271  ret i8 %2
272}
273
274define i8 @sltu(i8 %a, i8 %b) nounwind {
275; RV32I-LABEL: sltu:
276; RV32I:       # %bb.0:
277; RV32I-NEXT:    andi a1, a1, 255
278; RV32I-NEXT:    andi a0, a0, 255
279; RV32I-NEXT:    sltu a0, a0, a1
280; RV32I-NEXT:    ret
281;
282; RV64I-LABEL: sltu:
283; RV64I:       # %bb.0:
284; RV64I-NEXT:    andi a1, a1, 255
285; RV64I-NEXT:    andi a0, a0, 255
286; RV64I-NEXT:    sltu a0, a0, a1
287; RV64I-NEXT:    ret
288  %1 = icmp ult i8 %a, %b
289  %2 = zext i1 %1 to i8
290  ret i8 %2
291}
292
293define i8 @xor(i8 %a, i8 %b) nounwind {
294; RV32I-LABEL: xor:
295; RV32I:       # %bb.0:
296; RV32I-NEXT:    xor a0, a0, a1
297; RV32I-NEXT:    ret
298;
299; RV64I-LABEL: xor:
300; RV64I:       # %bb.0:
301; RV64I-NEXT:    xor a0, a0, a1
302; RV64I-NEXT:    ret
303  %1 = xor i8 %a, %b
304  ret i8 %1
305}
306
307define i8 @srl(i8 %a, i8 %b) nounwind {
308; RV32I-LABEL: srl:
309; RV32I:       # %bb.0:
310; RV32I-NEXT:    andi a0, a0, 255
311; RV32I-NEXT:    srl a0, a0, a1
312; RV32I-NEXT:    ret
313;
314; RV64I-LABEL: srl:
315; RV64I:       # %bb.0:
316; RV64I-NEXT:    andi a0, a0, 255
317; RV64I-NEXT:    srl a0, a0, a1
318; RV64I-NEXT:    ret
319  %1 = lshr i8 %a, %b
320  ret i8 %1
321}
322
323define i8 @sra(i8 %a, i8 %b) nounwind {
324; RV32I-LABEL: sra:
325; RV32I:       # %bb.0:
326; RV32I-NEXT:    slli a0, a0, 24
327; RV32I-NEXT:    srai a0, a0, 24
328; RV32I-NEXT:    sra a0, a0, a1
329; RV32I-NEXT:    ret
330;
331; RV64I-LABEL: sra:
332; RV64I:       # %bb.0:
333; RV64I-NEXT:    slli a0, a0, 56
334; RV64I-NEXT:    srai a0, a0, 56
335; RV64I-NEXT:    sra a0, a0, a1
336; RV64I-NEXT:    ret
337  %1 = ashr i8 %a, %b
338  ret i8 %1
339}
340
341define i8 @or(i8 %a, i8 %b) nounwind {
342; RV32I-LABEL: or:
343; RV32I:       # %bb.0:
344; RV32I-NEXT:    or a0, a0, a1
345; RV32I-NEXT:    ret
346;
347; RV64I-LABEL: or:
348; RV64I:       # %bb.0:
349; RV64I-NEXT:    or a0, a0, a1
350; RV64I-NEXT:    ret
351  %1 = or i8 %a, %b
352  ret i8 %1
353}
354
355define i8 @and(i8 %a, i8 %b) nounwind {
356; RV32I-LABEL: and:
357; RV32I:       # %bb.0:
358; RV32I-NEXT:    and a0, a0, a1
359; RV32I-NEXT:    ret
360;
361; RV64I-LABEL: and:
362; RV64I:       # %bb.0:
363; RV64I-NEXT:    and a0, a0, a1
364; RV64I-NEXT:    ret
365  %1 = and i8 %a, %b
366  ret i8 %1
367}
368