xref: /llvm-project/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll (revision 9122c5235ec85ce0c0ad337e862b006e7b349d84)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 < %s | FileCheck %s --check-prefixes=RV32
3; RUN: llc -mtriple=riscv64 < %s | FileCheck %s --check-prefixes=RV64
4; RUN: llc -mtriple=riscv32 -mattr=+m < %s | FileCheck %s --check-prefixes=RV32M
5; RUN: llc -mtriple=riscv64 -mattr=+m < %s | FileCheck %s --check-prefixes=RV64M
6; RUN: llc -mtriple=riscv32 -mattr=+m,+v -riscv-v-vector-bits-min=128 < %s | FileCheck %s --check-prefixes=RV32MV
7; RUN: llc -mtriple=riscv64 -mattr=+m,+v -riscv-v-vector-bits-min=128 < %s | FileCheck %s --check-prefixes=RV64MV
8
9define i1 @test_urem_odd(i13 %X) nounwind {
10; RV32-LABEL: test_urem_odd:
11; RV32:       # %bb.0:
12; RV32-NEXT:    addi sp, sp, -16
13; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
14; RV32-NEXT:    lui a1, 1
15; RV32-NEXT:    addi a1, a1, -819
16; RV32-NEXT:    call __mulsi3
17; RV32-NEXT:    slli a0, a0, 19
18; RV32-NEXT:    srli a0, a0, 19
19; RV32-NEXT:    sltiu a0, a0, 1639
20; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
21; RV32-NEXT:    addi sp, sp, 16
22; RV32-NEXT:    ret
23;
24; RV64-LABEL: test_urem_odd:
25; RV64:       # %bb.0:
26; RV64-NEXT:    addi sp, sp, -16
27; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
28; RV64-NEXT:    lui a1, 1
29; RV64-NEXT:    addiw a1, a1, -819
30; RV64-NEXT:    call __muldi3
31; RV64-NEXT:    slli a0, a0, 51
32; RV64-NEXT:    srli a0, a0, 51
33; RV64-NEXT:    sltiu a0, a0, 1639
34; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
35; RV64-NEXT:    addi sp, sp, 16
36; RV64-NEXT:    ret
37;
38; RV32M-LABEL: test_urem_odd:
39; RV32M:       # %bb.0:
40; RV32M-NEXT:    lui a1, 1
41; RV32M-NEXT:    addi a1, a1, -819
42; RV32M-NEXT:    mul a0, a0, a1
43; RV32M-NEXT:    slli a0, a0, 19
44; RV32M-NEXT:    srli a0, a0, 19
45; RV32M-NEXT:    sltiu a0, a0, 1639
46; RV32M-NEXT:    ret
47;
48; RV64M-LABEL: test_urem_odd:
49; RV64M:       # %bb.0:
50; RV64M-NEXT:    lui a1, 1
51; RV64M-NEXT:    addi a1, a1, -819
52; RV64M-NEXT:    mul a0, a0, a1
53; RV64M-NEXT:    slli a0, a0, 51
54; RV64M-NEXT:    srli a0, a0, 51
55; RV64M-NEXT:    sltiu a0, a0, 1639
56; RV64M-NEXT:    ret
57;
58; RV32MV-LABEL: test_urem_odd:
59; RV32MV:       # %bb.0:
60; RV32MV-NEXT:    lui a1, 1
61; RV32MV-NEXT:    addi a1, a1, -819
62; RV32MV-NEXT:    mul a0, a0, a1
63; RV32MV-NEXT:    slli a0, a0, 19
64; RV32MV-NEXT:    srli a0, a0, 19
65; RV32MV-NEXT:    sltiu a0, a0, 1639
66; RV32MV-NEXT:    ret
67;
68; RV64MV-LABEL: test_urem_odd:
69; RV64MV:       # %bb.0:
70; RV64MV-NEXT:    lui a1, 1
71; RV64MV-NEXT:    addi a1, a1, -819
72; RV64MV-NEXT:    mul a0, a0, a1
73; RV64MV-NEXT:    slli a0, a0, 51
74; RV64MV-NEXT:    srli a0, a0, 51
75; RV64MV-NEXT:    sltiu a0, a0, 1639
76; RV64MV-NEXT:    ret
77  %urem = urem i13 %X, 5
78  %cmp = icmp eq i13 %urem, 0
79  ret i1 %cmp
80}
81
82define i1 @test_urem_even(i27 %X) nounwind {
83; RV32-LABEL: test_urem_even:
84; RV32:       # %bb.0:
85; RV32-NEXT:    addi sp, sp, -16
86; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
87; RV32-NEXT:    lui a1, 28087
88; RV32-NEXT:    addi a1, a1, -585
89; RV32-NEXT:    call __mulsi3
90; RV32-NEXT:    slli a1, a0, 26
91; RV32-NEXT:    slli a0, a0, 5
92; RV32-NEXT:    srli a0, a0, 6
93; RV32-NEXT:    or a0, a0, a1
94; RV32-NEXT:    lui a1, 2341
95; RV32-NEXT:    slli a0, a0, 5
96; RV32-NEXT:    srli a0, a0, 5
97; RV32-NEXT:    addi a1, a1, -1755
98; RV32-NEXT:    sltu a0, a0, a1
99; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
100; RV32-NEXT:    addi sp, sp, 16
101; RV32-NEXT:    ret
102;
103; RV64-LABEL: test_urem_even:
104; RV64:       # %bb.0:
105; RV64-NEXT:    addi sp, sp, -16
106; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
107; RV64-NEXT:    lui a1, 28087
108; RV64-NEXT:    addiw a1, a1, -585
109; RV64-NEXT:    call __muldi3
110; RV64-NEXT:    slli a1, a0, 26
111; RV64-NEXT:    slli a0, a0, 37
112; RV64-NEXT:    srli a0, a0, 38
113; RV64-NEXT:    or a0, a0, a1
114; RV64-NEXT:    lui a1, 2341
115; RV64-NEXT:    slli a0, a0, 37
116; RV64-NEXT:    srli a0, a0, 37
117; RV64-NEXT:    addiw a1, a1, -1755
118; RV64-NEXT:    sltu a0, a0, a1
119; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
120; RV64-NEXT:    addi sp, sp, 16
121; RV64-NEXT:    ret
122;
123; RV32M-LABEL: test_urem_even:
124; RV32M:       # %bb.0:
125; RV32M-NEXT:    lui a1, 28087
126; RV32M-NEXT:    addi a1, a1, -585
127; RV32M-NEXT:    mul a0, a0, a1
128; RV32M-NEXT:    slli a1, a0, 26
129; RV32M-NEXT:    slli a0, a0, 5
130; RV32M-NEXT:    srli a0, a0, 6
131; RV32M-NEXT:    or a0, a0, a1
132; RV32M-NEXT:    lui a1, 2341
133; RV32M-NEXT:    slli a0, a0, 5
134; RV32M-NEXT:    srli a0, a0, 5
135; RV32M-NEXT:    addi a1, a1, -1755
136; RV32M-NEXT:    sltu a0, a0, a1
137; RV32M-NEXT:    ret
138;
139; RV64M-LABEL: test_urem_even:
140; RV64M:       # %bb.0:
141; RV64M-NEXT:    lui a1, 28087
142; RV64M-NEXT:    addi a1, a1, -585
143; RV64M-NEXT:    mul a0, a0, a1
144; RV64M-NEXT:    slli a1, a0, 26
145; RV64M-NEXT:    slli a0, a0, 37
146; RV64M-NEXT:    srli a0, a0, 38
147; RV64M-NEXT:    or a0, a0, a1
148; RV64M-NEXT:    lui a1, 2341
149; RV64M-NEXT:    slli a0, a0, 37
150; RV64M-NEXT:    srli a0, a0, 37
151; RV64M-NEXT:    addiw a1, a1, -1755
152; RV64M-NEXT:    sltu a0, a0, a1
153; RV64M-NEXT:    ret
154;
155; RV32MV-LABEL: test_urem_even:
156; RV32MV:       # %bb.0:
157; RV32MV-NEXT:    lui a1, 28087
158; RV32MV-NEXT:    addi a1, a1, -585
159; RV32MV-NEXT:    mul a0, a0, a1
160; RV32MV-NEXT:    slli a1, a0, 26
161; RV32MV-NEXT:    slli a0, a0, 5
162; RV32MV-NEXT:    srli a0, a0, 6
163; RV32MV-NEXT:    or a0, a0, a1
164; RV32MV-NEXT:    lui a1, 2341
165; RV32MV-NEXT:    slli a0, a0, 5
166; RV32MV-NEXT:    srli a0, a0, 5
167; RV32MV-NEXT:    addi a1, a1, -1755
168; RV32MV-NEXT:    sltu a0, a0, a1
169; RV32MV-NEXT:    ret
170;
171; RV64MV-LABEL: test_urem_even:
172; RV64MV:       # %bb.0:
173; RV64MV-NEXT:    lui a1, 28087
174; RV64MV-NEXT:    addi a1, a1, -585
175; RV64MV-NEXT:    mul a0, a0, a1
176; RV64MV-NEXT:    slli a1, a0, 26
177; RV64MV-NEXT:    slli a0, a0, 37
178; RV64MV-NEXT:    srli a0, a0, 38
179; RV64MV-NEXT:    or a0, a0, a1
180; RV64MV-NEXT:    lui a1, 2341
181; RV64MV-NEXT:    slli a0, a0, 37
182; RV64MV-NEXT:    srli a0, a0, 37
183; RV64MV-NEXT:    addiw a1, a1, -1755
184; RV64MV-NEXT:    sltu a0, a0, a1
185; RV64MV-NEXT:    ret
186  %urem = urem i27 %X, 14
187  %cmp = icmp eq i27 %urem, 0
188  ret i1 %cmp
189}
190
191define i1 @test_urem_odd_setne(i4 %X) nounwind {
192; RV32-LABEL: test_urem_odd_setne:
193; RV32:       # %bb.0:
194; RV32-NEXT:    slli a1, a0, 1
195; RV32-NEXT:    neg a0, a0
196; RV32-NEXT:    sub a0, a0, a1
197; RV32-NEXT:    andi a0, a0, 15
198; RV32-NEXT:    sltiu a0, a0, 4
199; RV32-NEXT:    xori a0, a0, 1
200; RV32-NEXT:    ret
201;
202; RV64-LABEL: test_urem_odd_setne:
203; RV64:       # %bb.0:
204; RV64-NEXT:    slli a1, a0, 1
205; RV64-NEXT:    negw a0, a0
206; RV64-NEXT:    subw a0, a0, a1
207; RV64-NEXT:    andi a0, a0, 15
208; RV64-NEXT:    sltiu a0, a0, 4
209; RV64-NEXT:    xori a0, a0, 1
210; RV64-NEXT:    ret
211;
212; RV32M-LABEL: test_urem_odd_setne:
213; RV32M:       # %bb.0:
214; RV32M-NEXT:    slli a1, a0, 1
215; RV32M-NEXT:    neg a0, a0
216; RV32M-NEXT:    sub a0, a0, a1
217; RV32M-NEXT:    andi a0, a0, 15
218; RV32M-NEXT:    sltiu a0, a0, 4
219; RV32M-NEXT:    xori a0, a0, 1
220; RV32M-NEXT:    ret
221;
222; RV64M-LABEL: test_urem_odd_setne:
223; RV64M:       # %bb.0:
224; RV64M-NEXT:    slli a1, a0, 1
225; RV64M-NEXT:    negw a0, a0
226; RV64M-NEXT:    subw a0, a0, a1
227; RV64M-NEXT:    andi a0, a0, 15
228; RV64M-NEXT:    sltiu a0, a0, 4
229; RV64M-NEXT:    xori a0, a0, 1
230; RV64M-NEXT:    ret
231;
232; RV32MV-LABEL: test_urem_odd_setne:
233; RV32MV:       # %bb.0:
234; RV32MV-NEXT:    slli a1, a0, 1
235; RV32MV-NEXT:    neg a0, a0
236; RV32MV-NEXT:    sub a0, a0, a1
237; RV32MV-NEXT:    andi a0, a0, 15
238; RV32MV-NEXT:    sltiu a0, a0, 4
239; RV32MV-NEXT:    xori a0, a0, 1
240; RV32MV-NEXT:    ret
241;
242; RV64MV-LABEL: test_urem_odd_setne:
243; RV64MV:       # %bb.0:
244; RV64MV-NEXT:    slli a1, a0, 1
245; RV64MV-NEXT:    negw a0, a0
246; RV64MV-NEXT:    subw a0, a0, a1
247; RV64MV-NEXT:    andi a0, a0, 15
248; RV64MV-NEXT:    sltiu a0, a0, 4
249; RV64MV-NEXT:    xori a0, a0, 1
250; RV64MV-NEXT:    ret
251  %urem = urem i4 %X, 5
252  %cmp = icmp ne i4 %urem, 0
253  ret i1 %cmp
254}
255
256define i1 @test_urem_negative_odd(i9 %X) nounwind {
257; RV32-LABEL: test_urem_negative_odd:
258; RV32:       # %bb.0:
259; RV32-NEXT:    addi sp, sp, -16
260; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
261; RV32-NEXT:    li a1, 307
262; RV32-NEXT:    call __mulsi3
263; RV32-NEXT:    andi a0, a0, 511
264; RV32-NEXT:    sltiu a0, a0, 2
265; RV32-NEXT:    xori a0, a0, 1
266; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
267; RV32-NEXT:    addi sp, sp, 16
268; RV32-NEXT:    ret
269;
270; RV64-LABEL: test_urem_negative_odd:
271; RV64:       # %bb.0:
272; RV64-NEXT:    addi sp, sp, -16
273; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
274; RV64-NEXT:    li a1, 307
275; RV64-NEXT:    call __muldi3
276; RV64-NEXT:    andi a0, a0, 511
277; RV64-NEXT:    sltiu a0, a0, 2
278; RV64-NEXT:    xori a0, a0, 1
279; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
280; RV64-NEXT:    addi sp, sp, 16
281; RV64-NEXT:    ret
282;
283; RV32M-LABEL: test_urem_negative_odd:
284; RV32M:       # %bb.0:
285; RV32M-NEXT:    li a1, 307
286; RV32M-NEXT:    mul a0, a0, a1
287; RV32M-NEXT:    andi a0, a0, 511
288; RV32M-NEXT:    sltiu a0, a0, 2
289; RV32M-NEXT:    xori a0, a0, 1
290; RV32M-NEXT:    ret
291;
292; RV64M-LABEL: test_urem_negative_odd:
293; RV64M:       # %bb.0:
294; RV64M-NEXT:    li a1, 307
295; RV64M-NEXT:    mul a0, a0, a1
296; RV64M-NEXT:    andi a0, a0, 511
297; RV64M-NEXT:    sltiu a0, a0, 2
298; RV64M-NEXT:    xori a0, a0, 1
299; RV64M-NEXT:    ret
300;
301; RV32MV-LABEL: test_urem_negative_odd:
302; RV32MV:       # %bb.0:
303; RV32MV-NEXT:    li a1, 307
304; RV32MV-NEXT:    mul a0, a0, a1
305; RV32MV-NEXT:    andi a0, a0, 511
306; RV32MV-NEXT:    sltiu a0, a0, 2
307; RV32MV-NEXT:    xori a0, a0, 1
308; RV32MV-NEXT:    ret
309;
310; RV64MV-LABEL: test_urem_negative_odd:
311; RV64MV:       # %bb.0:
312; RV64MV-NEXT:    li a1, 307
313; RV64MV-NEXT:    mul a0, a0, a1
314; RV64MV-NEXT:    andi a0, a0, 511
315; RV64MV-NEXT:    sltiu a0, a0, 2
316; RV64MV-NEXT:    xori a0, a0, 1
317; RV64MV-NEXT:    ret
318  %urem = urem i9 %X, -5
319  %cmp = icmp ne i9 %urem, 0
320  ret i1 %cmp
321}
322
323define void @test_urem_vec(ptr %X) nounwind {
324; RV32-LABEL: test_urem_vec:
325; RV32:       # %bb.0:
326; RV32-NEXT:    addi sp, sp, -32
327; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
328; RV32-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
329; RV32-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
330; RV32-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
331; RV32-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
332; RV32-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
333; RV32-NEXT:    mv s0, a0
334; RV32-NEXT:    lbu a0, 4(a0)
335; RV32-NEXT:    lw a1, 0(s0)
336; RV32-NEXT:    slli a0, a0, 10
337; RV32-NEXT:    srli s1, a1, 22
338; RV32-NEXT:    or s1, s1, a0
339; RV32-NEXT:    srli s2, a1, 11
340; RV32-NEXT:    andi a0, a1, 2047
341; RV32-NEXT:    li a1, 683
342; RV32-NEXT:    call __mulsi3
343; RV32-NEXT:    slli a1, a0, 10
344; RV32-NEXT:    slli a0, a0, 21
345; RV32-NEXT:    srli a0, a0, 22
346; RV32-NEXT:    or a0, a0, a1
347; RV32-NEXT:    andi a0, a0, 2047
348; RV32-NEXT:    sltiu s3, a0, 342
349; RV32-NEXT:    li a1, 819
350; RV32-NEXT:    mv a0, s1
351; RV32-NEXT:    call __mulsi3
352; RV32-NEXT:    addi a0, a0, -1638
353; RV32-NEXT:    andi a0, a0, 2047
354; RV32-NEXT:    sltiu s1, a0, 2
355; RV32-NEXT:    xori s4, s1, 1
356; RV32-NEXT:    li a1, 1463
357; RV32-NEXT:    mv a0, s2
358; RV32-NEXT:    call __mulsi3
359; RV32-NEXT:    addi a0, a0, -1463
360; RV32-NEXT:    addi s3, s3, -1
361; RV32-NEXT:    addi s1, s1, -1
362; RV32-NEXT:    andi a0, a0, 2047
363; RV32-NEXT:    andi a1, s3, 2047
364; RV32-NEXT:    slli s1, s1, 22
365; RV32-NEXT:    sltiu a0, a0, 293
366; RV32-NEXT:    addi a0, a0, -1
367; RV32-NEXT:    andi a0, a0, 2047
368; RV32-NEXT:    slli a0, a0, 11
369; RV32-NEXT:    or a0, a0, s1
370; RV32-NEXT:    or a0, a1, a0
371; RV32-NEXT:    sw a0, 0(s0)
372; RV32-NEXT:    sb s4, 4(s0)
373; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
374; RV32-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
375; RV32-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
376; RV32-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
377; RV32-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
378; RV32-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
379; RV32-NEXT:    addi sp, sp, 32
380; RV32-NEXT:    ret
381;
382; RV64-LABEL: test_urem_vec:
383; RV64:       # %bb.0:
384; RV64-NEXT:    addi sp, sp, -48
385; RV64-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
386; RV64-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
387; RV64-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
388; RV64-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
389; RV64-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
390; RV64-NEXT:    mv s0, a0
391; RV64-NEXT:    lbu a0, 4(a0)
392; RV64-NEXT:    lwu a1, 0(s0)
393; RV64-NEXT:    slli a0, a0, 32
394; RV64-NEXT:    or a0, a1, a0
395; RV64-NEXT:    srli s1, a0, 22
396; RV64-NEXT:    srli s2, a0, 11
397; RV64-NEXT:    andi a0, a0, 2047
398; RV64-NEXT:    li a1, 683
399; RV64-NEXT:    call __muldi3
400; RV64-NEXT:    slli a1, a0, 10
401; RV64-NEXT:    slli a0, a0, 53
402; RV64-NEXT:    srli a0, a0, 54
403; RV64-NEXT:    or a0, a0, a1
404; RV64-NEXT:    andi a0, a0, 2047
405; RV64-NEXT:    sltiu s3, a0, 342
406; RV64-NEXT:    li a1, 1463
407; RV64-NEXT:    mv a0, s2
408; RV64-NEXT:    call __muldi3
409; RV64-NEXT:    addi a0, a0, -1463
410; RV64-NEXT:    andi a0, a0, 2047
411; RV64-NEXT:    sltiu s2, a0, 293
412; RV64-NEXT:    li a1, 819
413; RV64-NEXT:    mv a0, s1
414; RV64-NEXT:    call __muldi3
415; RV64-NEXT:    addi a0, a0, -1638
416; RV64-NEXT:    addi s3, s3, -1
417; RV64-NEXT:    addi s2, s2, -1
418; RV64-NEXT:    andi a0, a0, 2047
419; RV64-NEXT:    andi a1, s3, 2047
420; RV64-NEXT:    andi a2, s2, 2047
421; RV64-NEXT:    sltiu a0, a0, 2
422; RV64-NEXT:    slli a2, a2, 11
423; RV64-NEXT:    addi a0, a0, -1
424; RV64-NEXT:    slli a0, a0, 22
425; RV64-NEXT:    or a0, a2, a0
426; RV64-NEXT:    or a0, a1, a0
427; RV64-NEXT:    slli a1, a0, 31
428; RV64-NEXT:    srli a1, a1, 63
429; RV64-NEXT:    sw a0, 0(s0)
430; RV64-NEXT:    sb a1, 4(s0)
431; RV64-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
432; RV64-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
433; RV64-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
434; RV64-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
435; RV64-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
436; RV64-NEXT:    addi sp, sp, 48
437; RV64-NEXT:    ret
438;
439; RV32M-LABEL: test_urem_vec:
440; RV32M:       # %bb.0:
441; RV32M-NEXT:    lbu a1, 4(a0)
442; RV32M-NEXT:    lw a2, 0(a0)
443; RV32M-NEXT:    li a3, 683
444; RV32M-NEXT:    li a4, 819
445; RV32M-NEXT:    slli a1, a1, 10
446; RV32M-NEXT:    srli a5, a2, 22
447; RV32M-NEXT:    or a1, a5, a1
448; RV32M-NEXT:    andi a5, a2, 2047
449; RV32M-NEXT:    mul a3, a5, a3
450; RV32M-NEXT:    li a5, 1463
451; RV32M-NEXT:    srli a2, a2, 11
452; RV32M-NEXT:    mul a2, a2, a5
453; RV32M-NEXT:    slli a5, a3, 10
454; RV32M-NEXT:    slli a3, a3, 21
455; RV32M-NEXT:    mul a1, a1, a4
456; RV32M-NEXT:    addi a2, a2, -1463
457; RV32M-NEXT:    srli a3, a3, 22
458; RV32M-NEXT:    addi a1, a1, -1638
459; RV32M-NEXT:    andi a2, a2, 2047
460; RV32M-NEXT:    or a3, a3, a5
461; RV32M-NEXT:    andi a1, a1, 2047
462; RV32M-NEXT:    sltiu a2, a2, 293
463; RV32M-NEXT:    andi a3, a3, 2047
464; RV32M-NEXT:    sltiu a1, a1, 2
465; RV32M-NEXT:    addi a2, a2, -1
466; RV32M-NEXT:    sltiu a3, a3, 342
467; RV32M-NEXT:    xori a4, a1, 1
468; RV32M-NEXT:    addi a1, a1, -1
469; RV32M-NEXT:    andi a2, a2, 2047
470; RV32M-NEXT:    addi a3, a3, -1
471; RV32M-NEXT:    slli a2, a2, 11
472; RV32M-NEXT:    slli a1, a1, 22
473; RV32M-NEXT:    andi a3, a3, 2047
474; RV32M-NEXT:    or a1, a2, a1
475; RV32M-NEXT:    or a1, a3, a1
476; RV32M-NEXT:    sw a1, 0(a0)
477; RV32M-NEXT:    sb a4, 4(a0)
478; RV32M-NEXT:    ret
479;
480; RV64M-LABEL: test_urem_vec:
481; RV64M:       # %bb.0:
482; RV64M-NEXT:    lbu a1, 4(a0)
483; RV64M-NEXT:    lwu a2, 0(a0)
484; RV64M-NEXT:    li a3, 683
485; RV64M-NEXT:    li a4, 1463
486; RV64M-NEXT:    slli a1, a1, 32
487; RV64M-NEXT:    or a1, a2, a1
488; RV64M-NEXT:    andi a2, a1, 2047
489; RV64M-NEXT:    mul a2, a2, a3
490; RV64M-NEXT:    srli a3, a1, 11
491; RV64M-NEXT:    mul a3, a3, a4
492; RV64M-NEXT:    li a4, 819
493; RV64M-NEXT:    srli a1, a1, 22
494; RV64M-NEXT:    mul a1, a1, a4
495; RV64M-NEXT:    slli a4, a2, 10
496; RV64M-NEXT:    slli a2, a2, 53
497; RV64M-NEXT:    addi a3, a3, -1463
498; RV64M-NEXT:    addi a1, a1, -1638
499; RV64M-NEXT:    srli a2, a2, 54
500; RV64M-NEXT:    andi a3, a3, 2047
501; RV64M-NEXT:    andi a1, a1, 2047
502; RV64M-NEXT:    or a2, a2, a4
503; RV64M-NEXT:    sltiu a3, a3, 293
504; RV64M-NEXT:    sltiu a1, a1, 2
505; RV64M-NEXT:    andi a2, a2, 2047
506; RV64M-NEXT:    addi a1, a1, -1
507; RV64M-NEXT:    addi a3, a3, -1
508; RV64M-NEXT:    sltiu a2, a2, 342
509; RV64M-NEXT:    andi a3, a3, 2047
510; RV64M-NEXT:    slli a1, a1, 22
511; RV64M-NEXT:    addi a2, a2, -1
512; RV64M-NEXT:    slli a3, a3, 11
513; RV64M-NEXT:    andi a2, a2, 2047
514; RV64M-NEXT:    or a1, a3, a1
515; RV64M-NEXT:    or a1, a2, a1
516; RV64M-NEXT:    slli a2, a1, 31
517; RV64M-NEXT:    srli a2, a2, 63
518; RV64M-NEXT:    sw a1, 0(a0)
519; RV64M-NEXT:    sb a2, 4(a0)
520; RV64M-NEXT:    ret
521;
522; RV32MV-LABEL: test_urem_vec:
523; RV32MV:       # %bb.0:
524; RV32MV-NEXT:    lw a1, 0(a0)
525; RV32MV-NEXT:    lbu a2, 4(a0)
526; RV32MV-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
527; RV32MV-NEXT:    vid.v v8
528; RV32MV-NEXT:    lui a3, %hi(.LCPI4_0)
529; RV32MV-NEXT:    addi a3, a3, %lo(.LCPI4_0)
530; RV32MV-NEXT:    vle16.v v9, (a3)
531; RV32MV-NEXT:    andi a3, a1, 2047
532; RV32MV-NEXT:    slli a2, a2, 10
533; RV32MV-NEXT:    vmv.v.x v10, a3
534; RV32MV-NEXT:    srli a3, a1, 22
535; RV32MV-NEXT:    or a2, a3, a2
536; RV32MV-NEXT:    lui a3, 41121
537; RV32MV-NEXT:    slli a1, a1, 10
538; RV32MV-NEXT:    srli a1, a1, 21
539; RV32MV-NEXT:    vslide1down.vx v10, v10, a1
540; RV32MV-NEXT:    li a1, 2047
541; RV32MV-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
542; RV32MV-NEXT:    vmv.v.i v11, 1
543; RV32MV-NEXT:    andi a2, a2, 2047
544; RV32MV-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
545; RV32MV-NEXT:    vslide1down.vx v10, v10, a2
546; RV32MV-NEXT:    lui a2, %hi(.LCPI4_1)
547; RV32MV-NEXT:    addi a2, a2, %lo(.LCPI4_1)
548; RV32MV-NEXT:    addi a3, a3, -1527
549; RV32MV-NEXT:    vsext.vf2 v12, v11
550; RV32MV-NEXT:    vslidedown.vi v10, v10, 1
551; RV32MV-NEXT:    vsub.vv v8, v10, v8
552; RV32MV-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
553; RV32MV-NEXT:    vmv.s.x v10, a3
554; RV32MV-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
555; RV32MV-NEXT:    vsext.vf2 v11, v10
556; RV32MV-NEXT:    vmul.vv v8, v8, v9
557; RV32MV-NEXT:    vadd.vv v9, v8, v8
558; RV32MV-NEXT:    vsll.vv v9, v9, v11
559; RV32MV-NEXT:    vle16.v v10, (a2)
560; RV32MV-NEXT:    vand.vx v8, v8, a1
561; RV32MV-NEXT:    vsrl.vv v8, v8, v12
562; RV32MV-NEXT:    vor.vv v8, v8, v9
563; RV32MV-NEXT:    vand.vx v8, v8, a1
564; RV32MV-NEXT:    vmsltu.vv v0, v10, v8
565; RV32MV-NEXT:    vmv.v.i v8, 0
566; RV32MV-NEXT:    vmerge.vim v8, v8, -1, v0
567; RV32MV-NEXT:    vslidedown.vi v9, v8, 2
568; RV32MV-NEXT:    vmv.x.s a1, v8
569; RV32MV-NEXT:    vslidedown.vi v8, v8, 1
570; RV32MV-NEXT:    vmv.x.s a2, v9
571; RV32MV-NEXT:    andi a1, a1, 2047
572; RV32MV-NEXT:    slli a3, a2, 22
573; RV32MV-NEXT:    or a1, a1, a3
574; RV32MV-NEXT:    vmv.x.s a3, v8
575; RV32MV-NEXT:    slli a2, a2, 21
576; RV32MV-NEXT:    andi a3, a3, 2047
577; RV32MV-NEXT:    srli a2, a2, 31
578; RV32MV-NEXT:    slli a3, a3, 11
579; RV32MV-NEXT:    or a1, a1, a3
580; RV32MV-NEXT:    sw a1, 0(a0)
581; RV32MV-NEXT:    sb a2, 4(a0)
582; RV32MV-NEXT:    ret
583;
584; RV64MV-LABEL: test_urem_vec:
585; RV64MV:       # %bb.0:
586; RV64MV-NEXT:    lwu a1, 0(a0)
587; RV64MV-NEXT:    lbu a2, 4(a0)
588; RV64MV-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
589; RV64MV-NEXT:    vid.v v8
590; RV64MV-NEXT:    lui a3, %hi(.LCPI4_0)
591; RV64MV-NEXT:    addi a3, a3, %lo(.LCPI4_0)
592; RV64MV-NEXT:    vle16.v v9, (a3)
593; RV64MV-NEXT:    lui a3, 41121
594; RV64MV-NEXT:    slli a2, a2, 32
595; RV64MV-NEXT:    or a1, a1, a2
596; RV64MV-NEXT:    andi a2, a1, 2047
597; RV64MV-NEXT:    vmv.v.x v10, a2
598; RV64MV-NEXT:    slli a2, a1, 42
599; RV64MV-NEXT:    srli a2, a2, 53
600; RV64MV-NEXT:    vslide1down.vx v10, v10, a2
601; RV64MV-NEXT:    li a2, 2047
602; RV64MV-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
603; RV64MV-NEXT:    vmv.v.i v11, 1
604; RV64MV-NEXT:    srli a1, a1, 22
605; RV64MV-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
606; RV64MV-NEXT:    vslide1down.vx v10, v10, a1
607; RV64MV-NEXT:    lui a1, %hi(.LCPI4_1)
608; RV64MV-NEXT:    addi a1, a1, %lo(.LCPI4_1)
609; RV64MV-NEXT:    addi a3, a3, -1527
610; RV64MV-NEXT:    vsext.vf2 v12, v11
611; RV64MV-NEXT:    vslidedown.vi v10, v10, 1
612; RV64MV-NEXT:    vsub.vv v8, v10, v8
613; RV64MV-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
614; RV64MV-NEXT:    vmv.s.x v10, a3
615; RV64MV-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
616; RV64MV-NEXT:    vsext.vf2 v11, v10
617; RV64MV-NEXT:    vmul.vv v8, v8, v9
618; RV64MV-NEXT:    vadd.vv v9, v8, v8
619; RV64MV-NEXT:    vsll.vv v9, v9, v11
620; RV64MV-NEXT:    vle16.v v10, (a1)
621; RV64MV-NEXT:    vand.vx v8, v8, a2
622; RV64MV-NEXT:    vsrl.vv v8, v8, v12
623; RV64MV-NEXT:    vor.vv v8, v8, v9
624; RV64MV-NEXT:    vand.vx v8, v8, a2
625; RV64MV-NEXT:    vmsltu.vv v0, v10, v8
626; RV64MV-NEXT:    vmv.v.i v8, 0
627; RV64MV-NEXT:    vmerge.vim v8, v8, -1, v0
628; RV64MV-NEXT:    vmv.x.s a1, v8
629; RV64MV-NEXT:    vslidedown.vi v9, v8, 1
630; RV64MV-NEXT:    vslidedown.vi v8, v8, 2
631; RV64MV-NEXT:    andi a1, a1, 2047
632; RV64MV-NEXT:    vmv.x.s a2, v9
633; RV64MV-NEXT:    vmv.x.s a3, v8
634; RV64MV-NEXT:    andi a2, a2, 2047
635; RV64MV-NEXT:    slli a3, a3, 22
636; RV64MV-NEXT:    slli a2, a2, 11
637; RV64MV-NEXT:    or a1, a1, a3
638; RV64MV-NEXT:    or a1, a1, a2
639; RV64MV-NEXT:    slli a2, a1, 31
640; RV64MV-NEXT:    srli a2, a2, 63
641; RV64MV-NEXT:    sw a1, 0(a0)
642; RV64MV-NEXT:    sb a2, 4(a0)
643; RV64MV-NEXT:    ret
644  %ld = load <3 x i11>, ptr %X
645  %urem = urem <3 x i11> %ld, <i11 6, i11 7, i11 -5>
646  %cmp = icmp ne <3 x i11> %urem, <i11 0, i11 1, i11 2>
647  %ext = sext <3 x i1> %cmp to <3 x i11>
648  store <3 x i11> %ext, ptr %X
649  ret void
650}
651