xref: /llvm-project/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/udiv.ll (revision a5c90e48b6f11bc6db7344503589648f76b16d80)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
2; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
3
4define void @udiv_v16i8(ptr %res, ptr %a0, ptr %a1) nounwind {
5; CHECK-LABEL: udiv_v16i8:
6; CHECK:       # %bb.0: # %entry
7; CHECK-NEXT:    vld $vr0, $a1, 0
8; CHECK-NEXT:    vld $vr1, $a2, 0
9; CHECK-NEXT:    vdiv.bu $vr0, $vr0, $vr1
10; CHECK-NEXT:    vst $vr0, $a0, 0
11; CHECK-NEXT:    ret
12entry:
13  %v0 = load <16 x i8>, ptr %a0
14  %v1 = load <16 x i8>, ptr %a1
15  %v2 = udiv <16 x i8> %v0, %v1
16  store <16 x i8> %v2, ptr %res
17  ret void
18}
19
20define void @udiv_v8i16(ptr %res, ptr %a0, ptr %a1) nounwind {
21; CHECK-LABEL: udiv_v8i16:
22; CHECK:       # %bb.0: # %entry
23; CHECK-NEXT:    vld $vr0, $a1, 0
24; CHECK-NEXT:    vld $vr1, $a2, 0
25; CHECK-NEXT:    vdiv.hu $vr0, $vr0, $vr1
26; CHECK-NEXT:    vst $vr0, $a0, 0
27; CHECK-NEXT:    ret
28entry:
29  %v0 = load <8 x i16>, ptr %a0
30  %v1 = load <8 x i16>, ptr %a1
31  %v2 = udiv <8 x i16> %v0, %v1
32  store <8 x i16> %v2, ptr %res
33  ret void
34}
35
36define void @udiv_v4i32(ptr %res, ptr %a0, ptr %a1) nounwind {
37; CHECK-LABEL: udiv_v4i32:
38; CHECK:       # %bb.0: # %entry
39; CHECK-NEXT:    vld $vr0, $a1, 0
40; CHECK-NEXT:    vld $vr1, $a2, 0
41; CHECK-NEXT:    vdiv.wu $vr0, $vr0, $vr1
42; CHECK-NEXT:    vst $vr0, $a0, 0
43; CHECK-NEXT:    ret
44entry:
45  %v0 = load <4 x i32>, ptr %a0
46  %v1 = load <4 x i32>, ptr %a1
47  %v2 = udiv <4 x i32> %v0, %v1
48  store <4 x i32> %v2, ptr %res
49  ret void
50}
51
52define void @udiv_v2i64(ptr %res, ptr %a0, ptr %a1) nounwind {
53; CHECK-LABEL: udiv_v2i64:
54; CHECK:       # %bb.0: # %entry
55; CHECK-NEXT:    vld $vr0, $a1, 0
56; CHECK-NEXT:    vld $vr1, $a2, 0
57; CHECK-NEXT:    vdiv.du $vr0, $vr0, $vr1
58; CHECK-NEXT:    vst $vr0, $a0, 0
59; CHECK-NEXT:    ret
60entry:
61  %v0 = load <2 x i64>, ptr %a0
62  %v1 = load <2 x i64>, ptr %a1
63  %v2 = udiv <2 x i64> %v0, %v1
64  store <2 x i64> %v2, ptr %res
65  ret void
66}
67
68define void @udiv_v16i8_8(ptr %res, ptr %a0) nounwind {
69; CHECK-LABEL: udiv_v16i8_8:
70; CHECK:       # %bb.0: # %entry
71; CHECK-NEXT:    vld $vr0, $a1, 0
72; CHECK-NEXT:    vsrli.b $vr0, $vr0, 3
73; CHECK-NEXT:    vst $vr0, $a0, 0
74; CHECK-NEXT:    ret
75entry:
76  %v0 = load <16 x i8>, ptr %a0
77  %v1 = udiv <16 x i8> %v0, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
78  store <16 x i8> %v1, ptr %res
79  ret void
80}
81
82define void @udiv_v8i16_8(ptr %res, ptr %a0) nounwind {
83; CHECK-LABEL: udiv_v8i16_8:
84; CHECK:       # %bb.0: # %entry
85; CHECK-NEXT:    vld $vr0, $a1, 0
86; CHECK-NEXT:    vsrli.h $vr0, $vr0, 3
87; CHECK-NEXT:    vst $vr0, $a0, 0
88; CHECK-NEXT:    ret
89entry:
90  %v0 = load <8 x i16>, ptr %a0
91  %v1 = udiv <8 x i16> %v0, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
92  store <8 x i16> %v1, ptr %res
93  ret void
94}
95
96define void @udiv_v4i32_8(ptr %res, ptr %a0) nounwind {
97; CHECK-LABEL: udiv_v4i32_8:
98; CHECK:       # %bb.0: # %entry
99; CHECK-NEXT:    vld $vr0, $a1, 0
100; CHECK-NEXT:    vsrli.w $vr0, $vr0, 3
101; CHECK-NEXT:    vst $vr0, $a0, 0
102; CHECK-NEXT:    ret
103entry:
104  %v0 = load <4 x i32>, ptr %a0
105  %v1 = udiv <4 x i32> %v0, <i32 8, i32 8, i32 8, i32 8>
106  store <4 x i32> %v1, ptr %res
107  ret void
108}
109
110define void @udiv_v2i64_8(ptr %res, ptr %a0) nounwind {
111; CHECK-LABEL: udiv_v2i64_8:
112; CHECK:       # %bb.0: # %entry
113; CHECK-NEXT:    vld $vr0, $a1, 0
114; CHECK-NEXT:    vsrli.d $vr0, $vr0, 3
115; CHECK-NEXT:    vst $vr0, $a0, 0
116; CHECK-NEXT:    ret
117entry:
118  %v0 = load <2 x i64>, ptr %a0
119  %v1 = udiv <2 x i64> %v0, <i64 8, i64 8>
120  store <2 x i64> %v1, ptr %res
121  ret void
122}
123