xref: /llvm-project/llvm/test/CodeGen/RISCV/alu16.ll (revision 9122c5235ec85ce0c0ad337e862b006e7b349d84)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
3; RUN:   | FileCheck %s -check-prefix=RV32I
4; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
5; RUN:   | FileCheck %s -check-prefix=RV64I
6
7; These tests are identical to those in alu32.ll but operate on i16. They check
8; that legalisation of these non-native types doesn't introduce unnecessary
9; inefficiencies.
10
11define i16 @addi(i16 %a) nounwind {
12; RV32I-LABEL: addi:
13; RV32I:       # %bb.0:
14; RV32I-NEXT:    addi a0, a0, 1
15; RV32I-NEXT:    ret
16;
17; RV64I-LABEL: addi:
18; RV64I:       # %bb.0:
19; RV64I-NEXT:    addi a0, a0, 1
20; RV64I-NEXT:    ret
21  %1 = add i16 %a, 1
22  ret i16 %1
23}
24
25define i16 @slti(i16 %a) nounwind {
26; RV32I-LABEL: slti:
27; RV32I:       # %bb.0:
28; RV32I-NEXT:    slli a0, a0, 16
29; RV32I-NEXT:    srai a0, a0, 16
30; RV32I-NEXT:    slti a0, a0, 2
31; RV32I-NEXT:    ret
32;
33; RV64I-LABEL: slti:
34; RV64I:       # %bb.0:
35; RV64I-NEXT:    slli a0, a0, 48
36; RV64I-NEXT:    srai a0, a0, 48
37; RV64I-NEXT:    slti a0, a0, 2
38; RV64I-NEXT:    ret
39  %1 = icmp slt i16 %a, 2
40  %2 = zext i1 %1 to i16
41  ret i16 %2
42}
43
44define i16 @sltiu(i16 %a) nounwind {
45; RV32I-LABEL: sltiu:
46; RV32I:       # %bb.0:
47; RV32I-NEXT:    slli a0, a0, 16
48; RV32I-NEXT:    srli a0, a0, 16
49; RV32I-NEXT:    sltiu a0, a0, 3
50; RV32I-NEXT:    ret
51;
52; RV64I-LABEL: sltiu:
53; RV64I:       # %bb.0:
54; RV64I-NEXT:    slli a0, a0, 48
55; RV64I-NEXT:    srli a0, a0, 48
56; RV64I-NEXT:    sltiu a0, a0, 3
57; RV64I-NEXT:    ret
58  %1 = icmp ult i16 %a, 3
59  %2 = zext i1 %1 to i16
60  ret i16 %2
61}
62
63; Make sure we avoid an AND, if the input of an unsigned compare is known
64; to be sign extended. This can occur due to InstCombine canonicalizing
65; x s>= 0 && x s< 10 to x u< 10.
66define i16 @sltiu_signext(i16 signext %a) nounwind {
67; RV32I-LABEL: sltiu_signext:
68; RV32I:       # %bb.0:
69; RV32I-NEXT:    sltiu a0, a0, 10
70; RV32I-NEXT:    ret
71;
72; RV64I-LABEL: sltiu_signext:
73; RV64I:       # %bb.0:
74; RV64I-NEXT:    sltiu a0, a0, 10
75; RV64I-NEXT:    ret
76  %1 = icmp ult i16 %a, 10
77  %2 = zext i1 %1 to i16
78  ret i16 %2
79}
80
81define i16 @xori(i16 %a) nounwind {
82; RV32I-LABEL: xori:
83; RV32I:       # %bb.0:
84; RV32I-NEXT:    xori a0, a0, 4
85; RV32I-NEXT:    ret
86;
87; RV64I-LABEL: xori:
88; RV64I:       # %bb.0:
89; RV64I-NEXT:    xori a0, a0, 4
90; RV64I-NEXT:    ret
91  %1 = xor i16 %a, 4
92  ret i16 %1
93}
94
95define i16 @ori(i16 %a) nounwind {
96; RV32I-LABEL: ori:
97; RV32I:       # %bb.0:
98; RV32I-NEXT:    ori a0, a0, 5
99; RV32I-NEXT:    ret
100;
101; RV64I-LABEL: ori:
102; RV64I:       # %bb.0:
103; RV64I-NEXT:    ori a0, a0, 5
104; RV64I-NEXT:    ret
105  %1 = or i16 %a, 5
106  ret i16 %1
107}
108
109define i16 @andi(i16 %a) nounwind {
110; RV32I-LABEL: andi:
111; RV32I:       # %bb.0:
112; RV32I-NEXT:    andi a0, a0, 6
113; RV32I-NEXT:    ret
114;
115; RV64I-LABEL: andi:
116; RV64I:       # %bb.0:
117; RV64I-NEXT:    andi a0, a0, 6
118; RV64I-NEXT:    ret
119  %1 = and i16 %a, 6
120  ret i16 %1
121}
122
123define i16 @slli(i16 %a) nounwind {
124; RV32I-LABEL: slli:
125; RV32I:       # %bb.0:
126; RV32I-NEXT:    slli a0, a0, 7
127; RV32I-NEXT:    ret
128;
129; RV64I-LABEL: slli:
130; RV64I:       # %bb.0:
131; RV64I-NEXT:    slli a0, a0, 7
132; RV64I-NEXT:    ret
133  %1 = shl i16 %a, 7
134  ret i16 %1
135}
136
137define i16 @srli(i16 %a) nounwind {
138; RV32I-LABEL: srli:
139; RV32I:       # %bb.0:
140; RV32I-NEXT:    slli a0, a0, 16
141; RV32I-NEXT:    srli a0, a0, 22
142; RV32I-NEXT:    ret
143;
144; RV64I-LABEL: srli:
145; RV64I:       # %bb.0:
146; RV64I-NEXT:    slli a0, a0, 48
147; RV64I-NEXT:    srli a0, a0, 54
148; RV64I-NEXT:    ret
149  %1 = lshr i16 %a, 6
150  ret i16 %1
151}
152
153define i16 @srai(i16 %a) nounwind {
154; RV32I-LABEL: srai:
155; RV32I:       # %bb.0:
156; RV32I-NEXT:    slli a0, a0, 16
157; RV32I-NEXT:    srai a0, a0, 25
158; RV32I-NEXT:    ret
159;
160; RV64I-LABEL: srai:
161; RV64I:       # %bb.0:
162; RV64I-NEXT:    slli a0, a0, 48
163; RV64I-NEXT:    srai a0, a0, 57
164; RV64I-NEXT:    ret
165  %1 = ashr i16 %a, 9
166  ret i16 %1
167}
168
169
170define i16 @add(i16 %a, i16 %b) nounwind {
171; RV32I-LABEL: add:
172; RV32I:       # %bb.0:
173; RV32I-NEXT:    add a0, a0, a1
174; RV32I-NEXT:    ret
175;
176; RV64I-LABEL: add:
177; RV64I:       # %bb.0:
178; RV64I-NEXT:    add a0, a0, a1
179; RV64I-NEXT:    ret
180  %1 = add i16 %a, %b
181  ret i16 %1
182}
183
184define i16 @sub(i16 %a, i16 %b) nounwind {
185; RV32I-LABEL: sub:
186; RV32I:       # %bb.0:
187; RV32I-NEXT:    sub a0, a0, a1
188; RV32I-NEXT:    ret
189;
190; RV64I-LABEL: sub:
191; RV64I:       # %bb.0:
192; RV64I-NEXT:    sub a0, a0, a1
193; RV64I-NEXT:    ret
194  %1 = sub i16 %a, %b
195  ret i16 %1
196}
197
198define i16 @sll(i16 %a, i16 %b) nounwind {
199; RV32I-LABEL: sll:
200; RV32I:       # %bb.0:
201; RV32I-NEXT:    sll a0, a0, a1
202; RV32I-NEXT:    ret
203;
204; RV64I-LABEL: sll:
205; RV64I:       # %bb.0:
206; RV64I-NEXT:    sll a0, a0, a1
207; RV64I-NEXT:    ret
208  %1 = shl i16 %a, %b
209  ret i16 %1
210}
211
212; Test the pattern we get from C integer promotion.
213define void @sll_ext(i16 %a, i32 signext %b, ptr %p) nounwind {
214; RV32I-LABEL: sll_ext:
215; RV32I:       # %bb.0:
216; RV32I-NEXT:    sll a0, a0, a1
217; RV32I-NEXT:    sh a0, 0(a2)
218; RV32I-NEXT:    ret
219;
220; RV64I-LABEL: sll_ext:
221; RV64I:       # %bb.0:
222; RV64I-NEXT:    sllw a0, a0, a1
223; RV64I-NEXT:    sh a0, 0(a2)
224; RV64I-NEXT:    ret
225  %1 = zext i16 %a to i32
226  %2 = shl i32 %1, %b
227  %3 = trunc i32 %2 to i16
228  store i16 %3, ptr %p
229  ret void
230}
231
232; Test the pattern we get from C integer promotion. This time with poison
233; generating flags.
234define void @sll_ext_drop_poison(i16 %a, i32 signext %b, ptr %p) nounwind {
235; RV32I-LABEL: sll_ext_drop_poison:
236; RV32I:       # %bb.0:
237; RV32I-NEXT:    sll a0, a0, a1
238; RV32I-NEXT:    sh a0, 0(a2)
239; RV32I-NEXT:    ret
240;
241; RV64I-LABEL: sll_ext_drop_poison:
242; RV64I:       # %bb.0:
243; RV64I-NEXT:    sllw a0, a0, a1
244; RV64I-NEXT:    sh a0, 0(a2)
245; RV64I-NEXT:    ret
246  %1 = zext i16 %a to i32
247  %2 = shl nuw nsw i32 %1, %b
248  %3 = trunc i32 %2 to i16
249  store i16 %3, ptr %p
250  ret void
251}
252
253define i16 @slt(i16 %a, i16 %b) nounwind {
254; RV32I-LABEL: slt:
255; RV32I:       # %bb.0:
256; RV32I-NEXT:    slli a1, a1, 16
257; RV32I-NEXT:    slli a0, a0, 16
258; RV32I-NEXT:    srai a1, a1, 16
259; RV32I-NEXT:    srai a0, a0, 16
260; RV32I-NEXT:    slt a0, a0, a1
261; RV32I-NEXT:    ret
262;
263; RV64I-LABEL: slt:
264; RV64I:       # %bb.0:
265; RV64I-NEXT:    slli a1, a1, 48
266; RV64I-NEXT:    slli a0, a0, 48
267; RV64I-NEXT:    srai a1, a1, 48
268; RV64I-NEXT:    srai a0, a0, 48
269; RV64I-NEXT:    slt a0, a0, a1
270; RV64I-NEXT:    ret
271  %1 = icmp slt i16 %a, %b
272  %2 = zext i1 %1 to i16
273  ret i16 %2
274}
275
276define i16 @sltu(i16 %a, i16 %b) nounwind {
277; RV32I-LABEL: sltu:
278; RV32I:       # %bb.0:
279; RV32I-NEXT:    lui a2, 16
280; RV32I-NEXT:    addi a2, a2, -1
281; RV32I-NEXT:    and a1, a1, a2
282; RV32I-NEXT:    and a0, a0, a2
283; RV32I-NEXT:    sltu a0, a0, a1
284; RV32I-NEXT:    ret
285;
286; RV64I-LABEL: sltu:
287; RV64I:       # %bb.0:
288; RV64I-NEXT:    lui a2, 16
289; RV64I-NEXT:    addiw a2, a2, -1
290; RV64I-NEXT:    and a1, a1, a2
291; RV64I-NEXT:    and a0, a0, a2
292; RV64I-NEXT:    sltu a0, a0, a1
293; RV64I-NEXT:    ret
294  %1 = icmp ult i16 %a, %b
295  %2 = zext i1 %1 to i16
296  ret i16 %2
297}
298
299define i16 @xor(i16 %a, i16 %b) nounwind {
300; RV32I-LABEL: xor:
301; RV32I:       # %bb.0:
302; RV32I-NEXT:    xor a0, a0, a1
303; RV32I-NEXT:    ret
304;
305; RV64I-LABEL: xor:
306; RV64I:       # %bb.0:
307; RV64I-NEXT:    xor a0, a0, a1
308; RV64I-NEXT:    ret
309  %1 = xor i16 %a, %b
310  ret i16 %1
311}
312
313define i16 @srl(i16 %a, i16 %b) nounwind {
314; RV32I-LABEL: srl:
315; RV32I:       # %bb.0:
316; RV32I-NEXT:    slli a0, a0, 16
317; RV32I-NEXT:    srli a0, a0, 16
318; RV32I-NEXT:    srl a0, a0, a1
319; RV32I-NEXT:    ret
320;
321; RV64I-LABEL: srl:
322; RV64I:       # %bb.0:
323; RV64I-NEXT:    slli a0, a0, 48
324; RV64I-NEXT:    srli a0, a0, 48
325; RV64I-NEXT:    srl a0, a0, a1
326; RV64I-NEXT:    ret
327  %1 = lshr i16 %a, %b
328  ret i16 %1
329}
330
331define i16 @sra(i16 %a, i16 %b) nounwind {
332; RV32I-LABEL: sra:
333; RV32I:       # %bb.0:
334; RV32I-NEXT:    slli a0, a0, 16
335; RV32I-NEXT:    srai a0, a0, 16
336; RV32I-NEXT:    sra a0, a0, a1
337; RV32I-NEXT:    ret
338;
339; RV64I-LABEL: sra:
340; RV64I:       # %bb.0:
341; RV64I-NEXT:    slli a0, a0, 48
342; RV64I-NEXT:    srai a0, a0, 48
343; RV64I-NEXT:    sra a0, a0, a1
344; RV64I-NEXT:    ret
345  %1 = ashr i16 %a, %b
346  ret i16 %1
347}
348
349define i16 @or(i16 %a, i16 %b) nounwind {
350; RV32I-LABEL: or:
351; RV32I:       # %bb.0:
352; RV32I-NEXT:    or a0, a0, a1
353; RV32I-NEXT:    ret
354;
355; RV64I-LABEL: or:
356; RV64I:       # %bb.0:
357; RV64I-NEXT:    or a0, a0, a1
358; RV64I-NEXT:    ret
359  %1 = or i16 %a, %b
360  ret i16 %1
361}
362
363define i16 @and(i16 %a, i16 %b) nounwind {
364; RV32I-LABEL: and:
365; RV32I:       # %bb.0:
366; RV32I-NEXT:    and a0, a0, a1
367; RV32I-NEXT:    ret
368;
369; RV64I-LABEL: and:
370; RV64I:       # %bb.0:
371; RV64I-NEXT:    and a0, a0, a1
372; RV64I-NEXT:    ret
373  %1 = and i16 %a, %b
374  ret i16 %1
375}
376