xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/urem-seteq-vec.ll (revision 9122c5235ec85ce0c0ad337e862b006e7b349d84)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+m,+v < %s | FileCheck %s --check-prefix=RV32
3; RUN: llc -mtriple=riscv64 -mattr=+m,+v < %s | FileCheck %s --check-prefix=RV64
4
5define <vscale x 1 x i16> @test_urem_vec_even_divisor_eq0(<vscale x 1 x i16> %x) nounwind {
6; RV32-LABEL: test_urem_vec_even_divisor_eq0:
7; RV32:       # %bb.0:
8; RV32-NEXT:    lui a0, 1048571
9; RV32-NEXT:    addi a0, a0, -1365
10; RV32-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
11; RV32-NEXT:    vmul.vx v8, v8, a0
12; RV32-NEXT:    lui a0, 3
13; RV32-NEXT:    addi a0, a0, -1366
14; RV32-NEXT:    vsll.vi v9, v8, 15
15; RV32-NEXT:    vsrl.vi v8, v8, 1
16; RV32-NEXT:    vor.vv v8, v8, v9
17; RV32-NEXT:    vmsgtu.vx v0, v8, a0
18; RV32-NEXT:    vmv.v.i v8, 0
19; RV32-NEXT:    vmerge.vim v8, v8, -1, v0
20; RV32-NEXT:    ret
21;
22; RV64-LABEL: test_urem_vec_even_divisor_eq0:
23; RV64:       # %bb.0:
24; RV64-NEXT:    lui a0, 1048571
25; RV64-NEXT:    addi a0, a0, -1365
26; RV64-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
27; RV64-NEXT:    vmul.vx v8, v8, a0
28; RV64-NEXT:    lui a0, 3
29; RV64-NEXT:    addi a0, a0, -1366
30; RV64-NEXT:    vsll.vi v9, v8, 15
31; RV64-NEXT:    vsrl.vi v8, v8, 1
32; RV64-NEXT:    vor.vv v8, v8, v9
33; RV64-NEXT:    vmsgtu.vx v0, v8, a0
34; RV64-NEXT:    vmv.v.i v8, 0
35; RV64-NEXT:    vmerge.vim v8, v8, -1, v0
36; RV64-NEXT:    ret
37  %urem = urem <vscale x 1 x i16> %x, splat (i16 6)
38  %cmp = icmp ne <vscale x 1 x i16> %urem, splat (i16 0)
39  %ext = sext <vscale x 1 x i1> %cmp to <vscale x 1 x i16>
40  ret <vscale x 1 x i16> %ext
41}
42
43define <vscale x 1 x i16> @test_urem_vec_odd_divisor_eq0(<vscale x 1 x i16> %x) nounwind {
44; RV32-LABEL: test_urem_vec_odd_divisor_eq0:
45; RV32:       # %bb.0:
46; RV32-NEXT:    lui a0, 1048573
47; RV32-NEXT:    addi a0, a0, -819
48; RV32-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
49; RV32-NEXT:    vmul.vx v8, v8, a0
50; RV32-NEXT:    lui a0, 3
51; RV32-NEXT:    addi a0, a0, 819
52; RV32-NEXT:    vmsgtu.vx v0, v8, a0
53; RV32-NEXT:    vmv.v.i v8, 0
54; RV32-NEXT:    vmerge.vim v8, v8, -1, v0
55; RV32-NEXT:    ret
56;
57; RV64-LABEL: test_urem_vec_odd_divisor_eq0:
58; RV64:       # %bb.0:
59; RV64-NEXT:    lui a0, 1048573
60; RV64-NEXT:    addi a0, a0, -819
61; RV64-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
62; RV64-NEXT:    vmul.vx v8, v8, a0
63; RV64-NEXT:    lui a0, 3
64; RV64-NEXT:    addi a0, a0, 819
65; RV64-NEXT:    vmsgtu.vx v0, v8, a0
66; RV64-NEXT:    vmv.v.i v8, 0
67; RV64-NEXT:    vmerge.vim v8, v8, -1, v0
68; RV64-NEXT:    ret
69  %urem = urem <vscale x 1 x i16> %x, splat (i16 5)
70  %cmp = icmp ne <vscale x 1 x i16> %urem, splat (i16 0)
71  %ext = sext <vscale x 1 x i1> %cmp to <vscale x 1 x i16>
72  ret <vscale x 1 x i16> %ext
73}
74
75define <vscale x 1 x i16> @test_urem_vec_even_divisor_eq1(<vscale x 1 x i16> %x) nounwind {
76; RV32-LABEL: test_urem_vec_even_divisor_eq1:
77; RV32:       # %bb.0:
78; RV32-NEXT:    li a0, 1
79; RV32-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
80; RV32-NEXT:    vsub.vx v8, v8, a0
81; RV32-NEXT:    lui a0, 1048571
82; RV32-NEXT:    addi a0, a0, -1365
83; RV32-NEXT:    vmul.vx v8, v8, a0
84; RV32-NEXT:    lui a0, 3
85; RV32-NEXT:    addi a0, a0, -1366
86; RV32-NEXT:    vsll.vi v9, v8, 15
87; RV32-NEXT:    vsrl.vi v8, v8, 1
88; RV32-NEXT:    vor.vv v8, v8, v9
89; RV32-NEXT:    vmsgtu.vx v0, v8, a0
90; RV32-NEXT:    vmv.v.i v8, 0
91; RV32-NEXT:    vmerge.vim v8, v8, -1, v0
92; RV32-NEXT:    ret
93;
94; RV64-LABEL: test_urem_vec_even_divisor_eq1:
95; RV64:       # %bb.0:
96; RV64-NEXT:    li a0, 1
97; RV64-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
98; RV64-NEXT:    vsub.vx v8, v8, a0
99; RV64-NEXT:    lui a0, 1048571
100; RV64-NEXT:    addi a0, a0, -1365
101; RV64-NEXT:    vmul.vx v8, v8, a0
102; RV64-NEXT:    lui a0, 3
103; RV64-NEXT:    addi a0, a0, -1366
104; RV64-NEXT:    vsll.vi v9, v8, 15
105; RV64-NEXT:    vsrl.vi v8, v8, 1
106; RV64-NEXT:    vor.vv v8, v8, v9
107; RV64-NEXT:    vmsgtu.vx v0, v8, a0
108; RV64-NEXT:    vmv.v.i v8, 0
109; RV64-NEXT:    vmerge.vim v8, v8, -1, v0
110; RV64-NEXT:    ret
111  %urem = urem <vscale x 1 x i16> %x, splat (i16 6)
112  %cmp = icmp ne <vscale x 1 x i16> %urem, splat (i16 1)
113  %ext = sext <vscale x 1 x i1> %cmp to <vscale x 1 x i16>
114  ret <vscale x 1 x i16> %ext
115}
116
117define <vscale x 1 x i16> @test_urem_vec_odd_divisor_eq1(<vscale x 1 x i16> %x) nounwind {
118; RV32-LABEL: test_urem_vec_odd_divisor_eq1:
119; RV32:       # %bb.0:
120; RV32-NEXT:    li a0, 1
121; RV32-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
122; RV32-NEXT:    vsub.vx v8, v8, a0
123; RV32-NEXT:    lui a0, 1048573
124; RV32-NEXT:    addi a0, a0, -819
125; RV32-NEXT:    vmul.vx v8, v8, a0
126; RV32-NEXT:    lui a0, 3
127; RV32-NEXT:    addi a0, a0, 818
128; RV32-NEXT:    vmsgtu.vx v0, v8, a0
129; RV32-NEXT:    vmv.v.i v8, 0
130; RV32-NEXT:    vmerge.vim v8, v8, -1, v0
131; RV32-NEXT:    ret
132;
133; RV64-LABEL: test_urem_vec_odd_divisor_eq1:
134; RV64:       # %bb.0:
135; RV64-NEXT:    li a0, 1
136; RV64-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
137; RV64-NEXT:    vsub.vx v8, v8, a0
138; RV64-NEXT:    lui a0, 1048573
139; RV64-NEXT:    addi a0, a0, -819
140; RV64-NEXT:    vmul.vx v8, v8, a0
141; RV64-NEXT:    lui a0, 3
142; RV64-NEXT:    addi a0, a0, 818
143; RV64-NEXT:    vmsgtu.vx v0, v8, a0
144; RV64-NEXT:    vmv.v.i v8, 0
145; RV64-NEXT:    vmerge.vim v8, v8, -1, v0
146; RV64-NEXT:    ret
147  %urem = urem <vscale x 1 x i16> %x, splat (i16 5)
148  %cmp = icmp ne <vscale x 1 x i16> %urem, splat (i16 1)
149  %ext = sext <vscale x 1 x i1> %cmp to <vscale x 1 x i16>
150  ret <vscale x 1 x i16> %ext
151}
152