xref: /llvm-project/clang/test/CodeGen/LoongArch/lsx/builtin-alias.c (revision 0e01c72c5645259d9a08a1a7ed39cb5cc41ce311)
1673c5308Schenli // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2673c5308Schenli // RUN: %clang_cc1 -triple loongarch64 -target-feature +lsx -O2 -emit-llvm %s -o - | FileCheck %s
3673c5308Schenli 
4673c5308Schenli #include <lsxintrin.h>
5673c5308Schenli 
6673c5308Schenli // CHECK-LABEL: @vsll_b(
7673c5308Schenli // CHECK-NEXT:  entry:
8*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
9*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
10*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsll.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
11*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
12*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
13673c5308Schenli //
vsll_b(v16i8 _1,v16i8 _2)14673c5308Schenli v16i8 vsll_b(v16i8 _1, v16i8 _2) { return __lsx_vsll_b(_1, _2); }
15673c5308Schenli // CHECK-LABEL: @vsll_h(
16673c5308Schenli // CHECK-NEXT:  entry:
17*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
18*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
19*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsll.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
20*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
21*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
22673c5308Schenli //
vsll_h(v8i16 _1,v8i16 _2)23673c5308Schenli v8i16 vsll_h(v8i16 _1, v8i16 _2) { return __lsx_vsll_h(_1, _2); }
24673c5308Schenli // CHECK-LABEL: @vsll_w(
25673c5308Schenli // CHECK-NEXT:  entry:
26*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
27*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
28*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsll.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
29*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
30*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
31673c5308Schenli //
vsll_w(v4i32 _1,v4i32 _2)32673c5308Schenli v4i32 vsll_w(v4i32 _1, v4i32 _2) { return __lsx_vsll_w(_1, _2); }
33673c5308Schenli // CHECK-LABEL: @vsll_d(
34673c5308Schenli // CHECK-NEXT:  entry:
35*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
36*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
37*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsll.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
38*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
39*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
40673c5308Schenli //
vsll_d(v2i64 _1,v2i64 _2)41673c5308Schenli v2i64 vsll_d(v2i64 _1, v2i64 _2) { return __lsx_vsll_d(_1, _2); }
42673c5308Schenli // CHECK-LABEL: @vslli_b(
43673c5308Schenli // CHECK-NEXT:  entry:
44*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
45*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vslli.b(<16 x i8> [[TMP0]], i32 1)
46*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
47*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
48673c5308Schenli //
vslli_b(v16i8 _1)49673c5308Schenli v16i8 vslli_b(v16i8 _1) { return __lsx_vslli_b(_1, 1); }
50673c5308Schenli // CHECK-LABEL: @vslli_h(
51673c5308Schenli // CHECK-NEXT:  entry:
52*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
53*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vslli.h(<8 x i16> [[TMP0]], i32 1)
54*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
55*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
56673c5308Schenli //
vslli_h(v8i16 _1)57673c5308Schenli v8i16 vslli_h(v8i16 _1) { return __lsx_vslli_h(_1, 1); }
58673c5308Schenli // CHECK-LABEL: @vslli_w(
59673c5308Schenli // CHECK-NEXT:  entry:
60*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
61*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vslli.w(<4 x i32> [[TMP0]], i32 1)
62*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
63*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
64673c5308Schenli //
vslli_w(v4i32 _1)65673c5308Schenli v4i32 vslli_w(v4i32 _1) { return __lsx_vslli_w(_1, 1); }
66673c5308Schenli // CHECK-LABEL: @vslli_d(
67673c5308Schenli // CHECK-NEXT:  entry:
68*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
69*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vslli.d(<2 x i64> [[TMP0]], i32 1)
70*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
71*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
72673c5308Schenli //
vslli_d(v2i64 _1)73673c5308Schenli v2i64 vslli_d(v2i64 _1) { return __lsx_vslli_d(_1, 1); }
74673c5308Schenli // CHECK-LABEL: @vsra_b(
75673c5308Schenli // CHECK-NEXT:  entry:
76*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
77*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
78*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsra.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
79*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
80*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
81673c5308Schenli //
vsra_b(v16i8 _1,v16i8 _2)82673c5308Schenli v16i8 vsra_b(v16i8 _1, v16i8 _2) { return __lsx_vsra_b(_1, _2); }
83673c5308Schenli // CHECK-LABEL: @vsra_h(
84673c5308Schenli // CHECK-NEXT:  entry:
85*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
86*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
87*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsra.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
88*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
89*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
90673c5308Schenli //
vsra_h(v8i16 _1,v8i16 _2)91673c5308Schenli v8i16 vsra_h(v8i16 _1, v8i16 _2) { return __lsx_vsra_h(_1, _2); }
92673c5308Schenli // CHECK-LABEL: @vsra_w(
93673c5308Schenli // CHECK-NEXT:  entry:
94*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
95*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
96*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsra.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
97*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
98*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
99673c5308Schenli //
vsra_w(v4i32 _1,v4i32 _2)100673c5308Schenli v4i32 vsra_w(v4i32 _1, v4i32 _2) { return __lsx_vsra_w(_1, _2); }
101673c5308Schenli // CHECK-LABEL: @vsra_d(
102673c5308Schenli // CHECK-NEXT:  entry:
103*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
104*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
105*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsra.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
106*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
107*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
108673c5308Schenli //
vsra_d(v2i64 _1,v2i64 _2)109673c5308Schenli v2i64 vsra_d(v2i64 _1, v2i64 _2) { return __lsx_vsra_d(_1, _2); }
110673c5308Schenli // CHECK-LABEL: @vsrai_b(
111673c5308Schenli // CHECK-NEXT:  entry:
112*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
113*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsrai.b(<16 x i8> [[TMP0]], i32 1)
114*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
115*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
116673c5308Schenli //
vsrai_b(v16i8 _1)117673c5308Schenli v16i8 vsrai_b(v16i8 _1) { return __lsx_vsrai_b(_1, 1); }
118673c5308Schenli // CHECK-LABEL: @vsrai_h(
119673c5308Schenli // CHECK-NEXT:  entry:
120*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
121*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsrai.h(<8 x i16> [[TMP0]], i32 1)
122*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
123*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
124673c5308Schenli //
vsrai_h(v8i16 _1)125673c5308Schenli v8i16 vsrai_h(v8i16 _1) { return __lsx_vsrai_h(_1, 1); }
126673c5308Schenli // CHECK-LABEL: @vsrai_w(
127673c5308Schenli // CHECK-NEXT:  entry:
128*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
129*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsrai.w(<4 x i32> [[TMP0]], i32 1)
130*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
131*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
132673c5308Schenli //
vsrai_w(v4i32 _1)133673c5308Schenli v4i32 vsrai_w(v4i32 _1) { return __lsx_vsrai_w(_1, 1); }
134673c5308Schenli // CHECK-LABEL: @vsrai_d(
135673c5308Schenli // CHECK-NEXT:  entry:
136*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
137*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsrai.d(<2 x i64> [[TMP0]], i32 1)
138*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
139*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
140673c5308Schenli //
vsrai_d(v2i64 _1)141673c5308Schenli v2i64 vsrai_d(v2i64 _1) { return __lsx_vsrai_d(_1, 1); }
142673c5308Schenli // CHECK-LABEL: @vsrar_b(
143673c5308Schenli // CHECK-NEXT:  entry:
144*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
145*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
146*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsrar.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
147*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
148*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
149673c5308Schenli //
vsrar_b(v16i8 _1,v16i8 _2)150673c5308Schenli v16i8 vsrar_b(v16i8 _1, v16i8 _2) { return __lsx_vsrar_b(_1, _2); }
151673c5308Schenli // CHECK-LABEL: @vsrar_h(
152673c5308Schenli // CHECK-NEXT:  entry:
153*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
154*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
155*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsrar.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
156*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
157*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
158673c5308Schenli //
vsrar_h(v8i16 _1,v8i16 _2)159673c5308Schenli v8i16 vsrar_h(v8i16 _1, v8i16 _2) { return __lsx_vsrar_h(_1, _2); }
160673c5308Schenli // CHECK-LABEL: @vsrar_w(
161673c5308Schenli // CHECK-NEXT:  entry:
162*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
163*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
164*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsrar.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
165*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
166*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
167673c5308Schenli //
vsrar_w(v4i32 _1,v4i32 _2)168673c5308Schenli v4i32 vsrar_w(v4i32 _1, v4i32 _2) { return __lsx_vsrar_w(_1, _2); }
169673c5308Schenli // CHECK-LABEL: @vsrar_d(
170673c5308Schenli // CHECK-NEXT:  entry:
171*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
172*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
173*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsrar.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
174*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
175*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
176673c5308Schenli //
vsrar_d(v2i64 _1,v2i64 _2)177673c5308Schenli v2i64 vsrar_d(v2i64 _1, v2i64 _2) { return __lsx_vsrar_d(_1, _2); }
178673c5308Schenli // CHECK-LABEL: @vsrari_b(
179673c5308Schenli // CHECK-NEXT:  entry:
180*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
181*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsrari.b(<16 x i8> [[TMP0]], i32 1)
182*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
183*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
184673c5308Schenli //
vsrari_b(v16i8 _1)185673c5308Schenli v16i8 vsrari_b(v16i8 _1) { return __lsx_vsrari_b(_1, 1); }
186673c5308Schenli // CHECK-LABEL: @vsrari_h(
187673c5308Schenli // CHECK-NEXT:  entry:
188*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
189*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsrari.h(<8 x i16> [[TMP0]], i32 1)
190*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
191*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
192673c5308Schenli //
vsrari_h(v8i16 _1)193673c5308Schenli v8i16 vsrari_h(v8i16 _1) { return __lsx_vsrari_h(_1, 1); }
194673c5308Schenli // CHECK-LABEL: @vsrari_w(
195673c5308Schenli // CHECK-NEXT:  entry:
196*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
197*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsrari.w(<4 x i32> [[TMP0]], i32 1)
198*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
199*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
200673c5308Schenli //
vsrari_w(v4i32 _1)201673c5308Schenli v4i32 vsrari_w(v4i32 _1) { return __lsx_vsrari_w(_1, 1); }
202673c5308Schenli // CHECK-LABEL: @vsrari_d(
203673c5308Schenli // CHECK-NEXT:  entry:
204*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
205*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsrari.d(<2 x i64> [[TMP0]], i32 1)
206*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
207*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
208673c5308Schenli //
vsrari_d(v2i64 _1)209673c5308Schenli v2i64 vsrari_d(v2i64 _1) { return __lsx_vsrari_d(_1, 1); }
210673c5308Schenli // CHECK-LABEL: @vsrl_b(
211673c5308Schenli // CHECK-NEXT:  entry:
212*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
213*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
214*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsrl.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
215*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
216*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
217673c5308Schenli //
vsrl_b(v16i8 _1,v16i8 _2)218673c5308Schenli v16i8 vsrl_b(v16i8 _1, v16i8 _2) { return __lsx_vsrl_b(_1, _2); }
219673c5308Schenli // CHECK-LABEL: @vsrl_h(
220673c5308Schenli // CHECK-NEXT:  entry:
221*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
222*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
223*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsrl.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
224*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
225*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
226673c5308Schenli //
vsrl_h(v8i16 _1,v8i16 _2)227673c5308Schenli v8i16 vsrl_h(v8i16 _1, v8i16 _2) { return __lsx_vsrl_h(_1, _2); }
228673c5308Schenli // CHECK-LABEL: @vsrl_w(
229673c5308Schenli // CHECK-NEXT:  entry:
230*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
231*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
232*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsrl.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
233*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
234*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
235673c5308Schenli //
vsrl_w(v4i32 _1,v4i32 _2)236673c5308Schenli v4i32 vsrl_w(v4i32 _1, v4i32 _2) { return __lsx_vsrl_w(_1, _2); }
237673c5308Schenli // CHECK-LABEL: @vsrl_d(
238673c5308Schenli // CHECK-NEXT:  entry:
239*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
240*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
241*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsrl.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
242*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
243*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
244673c5308Schenli //
vsrl_d(v2i64 _1,v2i64 _2)245673c5308Schenli v2i64 vsrl_d(v2i64 _1, v2i64 _2) { return __lsx_vsrl_d(_1, _2); }
246673c5308Schenli // CHECK-LABEL: @vsrli_b(
247673c5308Schenli // CHECK-NEXT:  entry:
248*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
249*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsrli.b(<16 x i8> [[TMP0]], i32 1)
250*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
251*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
252673c5308Schenli //
vsrli_b(v16i8 _1)253673c5308Schenli v16i8 vsrli_b(v16i8 _1) { return __lsx_vsrli_b(_1, 1); }
254673c5308Schenli // CHECK-LABEL: @vsrli_h(
255673c5308Schenli // CHECK-NEXT:  entry:
256*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
257*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsrli.h(<8 x i16> [[TMP0]], i32 1)
258*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
259*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
260673c5308Schenli //
vsrli_h(v8i16 _1)261673c5308Schenli v8i16 vsrli_h(v8i16 _1) { return __lsx_vsrli_h(_1, 1); }
262673c5308Schenli // CHECK-LABEL: @vsrli_w(
263673c5308Schenli // CHECK-NEXT:  entry:
264*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
265*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsrli.w(<4 x i32> [[TMP0]], i32 1)
266*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
267*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
268673c5308Schenli //
vsrli_w(v4i32 _1)269673c5308Schenli v4i32 vsrli_w(v4i32 _1) { return __lsx_vsrli_w(_1, 1); }
270673c5308Schenli // CHECK-LABEL: @vsrli_d(
271673c5308Schenli // CHECK-NEXT:  entry:
272*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
273*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsrli.d(<2 x i64> [[TMP0]], i32 1)
274*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
275*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
276673c5308Schenli //
vsrli_d(v2i64 _1)277673c5308Schenli v2i64 vsrli_d(v2i64 _1) { return __lsx_vsrli_d(_1, 1); }
278673c5308Schenli // CHECK-LABEL: @vsrlr_b(
279673c5308Schenli // CHECK-NEXT:  entry:
280*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
281*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
282*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsrlr.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
283*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
284*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
285673c5308Schenli //
vsrlr_b(v16i8 _1,v16i8 _2)286673c5308Schenli v16i8 vsrlr_b(v16i8 _1, v16i8 _2) { return __lsx_vsrlr_b(_1, _2); }
287673c5308Schenli // CHECK-LABEL: @vsrlr_h(
288673c5308Schenli // CHECK-NEXT:  entry:
289*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
290*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
291*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsrlr.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
292*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
293*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
294673c5308Schenli //
vsrlr_h(v8i16 _1,v8i16 _2)295673c5308Schenli v8i16 vsrlr_h(v8i16 _1, v8i16 _2) { return __lsx_vsrlr_h(_1, _2); }
296673c5308Schenli // CHECK-LABEL: @vsrlr_w(
297673c5308Schenli // CHECK-NEXT:  entry:
298*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
299*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
300*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsrlr.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
301*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
302*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
303673c5308Schenli //
vsrlr_w(v4i32 _1,v4i32 _2)304673c5308Schenli v4i32 vsrlr_w(v4i32 _1, v4i32 _2) { return __lsx_vsrlr_w(_1, _2); }
305673c5308Schenli // CHECK-LABEL: @vsrlr_d(
306673c5308Schenli // CHECK-NEXT:  entry:
307*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
308*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
309*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsrlr.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
310*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
311*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
312673c5308Schenli //
vsrlr_d(v2i64 _1,v2i64 _2)313673c5308Schenli v2i64 vsrlr_d(v2i64 _1, v2i64 _2) { return __lsx_vsrlr_d(_1, _2); }
314673c5308Schenli // CHECK-LABEL: @vsrlri_b(
315673c5308Schenli // CHECK-NEXT:  entry:
316*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
317*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsrlri.b(<16 x i8> [[TMP0]], i32 1)
318*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
319*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
320673c5308Schenli //
vsrlri_b(v16i8 _1)321673c5308Schenli v16i8 vsrlri_b(v16i8 _1) { return __lsx_vsrlri_b(_1, 1); }
322673c5308Schenli // CHECK-LABEL: @vsrlri_h(
323673c5308Schenli // CHECK-NEXT:  entry:
324*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
325*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsrlri.h(<8 x i16> [[TMP0]], i32 1)
326*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
327*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
328673c5308Schenli //
vsrlri_h(v8i16 _1)329673c5308Schenli v8i16 vsrlri_h(v8i16 _1) { return __lsx_vsrlri_h(_1, 1); }
330673c5308Schenli // CHECK-LABEL: @vsrlri_w(
331673c5308Schenli // CHECK-NEXT:  entry:
332*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
333*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsrlri.w(<4 x i32> [[TMP0]], i32 1)
334*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
335*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
336673c5308Schenli //
vsrlri_w(v4i32 _1)337673c5308Schenli v4i32 vsrlri_w(v4i32 _1) { return __lsx_vsrlri_w(_1, 1); }
338673c5308Schenli // CHECK-LABEL: @vsrlri_d(
339673c5308Schenli // CHECK-NEXT:  entry:
340*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
341*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsrlri.d(<2 x i64> [[TMP0]], i32 1)
342*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
343*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
344673c5308Schenli //
vsrlri_d(v2i64 _1)345673c5308Schenli v2i64 vsrlri_d(v2i64 _1) { return __lsx_vsrlri_d(_1, 1); }
346673c5308Schenli // CHECK-LABEL: @vbitclr_b(
347673c5308Schenli // CHECK-NEXT:  entry:
348*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
349*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
350*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vbitclr.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
351*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
352*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
353673c5308Schenli //
vbitclr_b(v16u8 _1,v16u8 _2)354673c5308Schenli v16u8 vbitclr_b(v16u8 _1, v16u8 _2) { return __lsx_vbitclr_b(_1, _2); }
355673c5308Schenli // CHECK-LABEL: @vbitclr_h(
356673c5308Schenli // CHECK-NEXT:  entry:
357*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
358*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
359*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vbitclr.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
360*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
361*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
362673c5308Schenli //
vbitclr_h(v8u16 _1,v8u16 _2)363673c5308Schenli v8u16 vbitclr_h(v8u16 _1, v8u16 _2) { return __lsx_vbitclr_h(_1, _2); }
364673c5308Schenli // CHECK-LABEL: @vbitclr_w(
365673c5308Schenli // CHECK-NEXT:  entry:
366*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
367*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
368*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vbitclr.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
369*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
370*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
371673c5308Schenli //
vbitclr_w(v4u32 _1,v4u32 _2)372673c5308Schenli v4u32 vbitclr_w(v4u32 _1, v4u32 _2) { return __lsx_vbitclr_w(_1, _2); }
373673c5308Schenli // CHECK-LABEL: @vbitclr_d(
374673c5308Schenli // CHECK-NEXT:  entry:
375*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
376*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
377*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vbitclr.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
378*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
379*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
380673c5308Schenli //
vbitclr_d(v2u64 _1,v2u64 _2)381673c5308Schenli v2u64 vbitclr_d(v2u64 _1, v2u64 _2) { return __lsx_vbitclr_d(_1, _2); }
382673c5308Schenli // CHECK-LABEL: @vbitclri_b(
383673c5308Schenli // CHECK-NEXT:  entry:
384*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
385*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vbitclri.b(<16 x i8> [[TMP0]], i32 1)
386*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
387*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
388673c5308Schenli //
vbitclri_b(v16u8 _1)389673c5308Schenli v16u8 vbitclri_b(v16u8 _1) { return __lsx_vbitclri_b(_1, 1); }
390673c5308Schenli // CHECK-LABEL: @vbitclri_h(
391673c5308Schenli // CHECK-NEXT:  entry:
392*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
393*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vbitclri.h(<8 x i16> [[TMP0]], i32 1)
394*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
395*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
396673c5308Schenli //
vbitclri_h(v8u16 _1)397673c5308Schenli v8u16 vbitclri_h(v8u16 _1) { return __lsx_vbitclri_h(_1, 1); }
398673c5308Schenli // CHECK-LABEL: @vbitclri_w(
399673c5308Schenli // CHECK-NEXT:  entry:
400*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
401*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vbitclri.w(<4 x i32> [[TMP0]], i32 1)
402*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
403*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
404673c5308Schenli //
vbitclri_w(v4u32 _1)405673c5308Schenli v4u32 vbitclri_w(v4u32 _1) { return __lsx_vbitclri_w(_1, 1); }
406673c5308Schenli // CHECK-LABEL: @vbitclri_d(
407673c5308Schenli // CHECK-NEXT:  entry:
408*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
409*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vbitclri.d(<2 x i64> [[TMP0]], i32 1)
410*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
411*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
412673c5308Schenli //
vbitclri_d(v2u64 _1)413673c5308Schenli v2u64 vbitclri_d(v2u64 _1) { return __lsx_vbitclri_d(_1, 1); }
414673c5308Schenli // CHECK-LABEL: @vbitset_b(
415673c5308Schenli // CHECK-NEXT:  entry:
416*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
417*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
418*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vbitset.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
419*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
420*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
421673c5308Schenli //
vbitset_b(v16u8 _1,v16u8 _2)422673c5308Schenli v16u8 vbitset_b(v16u8 _1, v16u8 _2) { return __lsx_vbitset_b(_1, _2); }
423673c5308Schenli // CHECK-LABEL: @vbitset_h(
424673c5308Schenli // CHECK-NEXT:  entry:
425*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
426*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
427*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vbitset.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
428*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
429*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
430673c5308Schenli //
vbitset_h(v8u16 _1,v8u16 _2)431673c5308Schenli v8u16 vbitset_h(v8u16 _1, v8u16 _2) { return __lsx_vbitset_h(_1, _2); }
432673c5308Schenli // CHECK-LABEL: @vbitset_w(
433673c5308Schenli // CHECK-NEXT:  entry:
434*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
435*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
436*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vbitset.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
437*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
438*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
439673c5308Schenli //
vbitset_w(v4u32 _1,v4u32 _2)440673c5308Schenli v4u32 vbitset_w(v4u32 _1, v4u32 _2) { return __lsx_vbitset_w(_1, _2); }
441673c5308Schenli // CHECK-LABEL: @vbitset_d(
442673c5308Schenli // CHECK-NEXT:  entry:
443*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
444*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
445*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vbitset.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
446*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
447*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
448673c5308Schenli //
vbitset_d(v2u64 _1,v2u64 _2)449673c5308Schenli v2u64 vbitset_d(v2u64 _1, v2u64 _2) { return __lsx_vbitset_d(_1, _2); }
450673c5308Schenli // CHECK-LABEL: @vbitseti_b(
451673c5308Schenli // CHECK-NEXT:  entry:
452*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
453*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vbitseti.b(<16 x i8> [[TMP0]], i32 1)
454*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
455*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
456673c5308Schenli //
vbitseti_b(v16u8 _1)457673c5308Schenli v16u8 vbitseti_b(v16u8 _1) { return __lsx_vbitseti_b(_1, 1); }
458673c5308Schenli // CHECK-LABEL: @vbitseti_h(
459673c5308Schenli // CHECK-NEXT:  entry:
460*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
461*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vbitseti.h(<8 x i16> [[TMP0]], i32 1)
462*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
463*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
464673c5308Schenli //
vbitseti_h(v8u16 _1)465673c5308Schenli v8u16 vbitseti_h(v8u16 _1) { return __lsx_vbitseti_h(_1, 1); }
466673c5308Schenli // CHECK-LABEL: @vbitseti_w(
467673c5308Schenli // CHECK-NEXT:  entry:
468*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
469*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vbitseti.w(<4 x i32> [[TMP0]], i32 1)
470*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
471*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
472673c5308Schenli //
vbitseti_w(v4u32 _1)473673c5308Schenli v4u32 vbitseti_w(v4u32 _1) { return __lsx_vbitseti_w(_1, 1); }
474673c5308Schenli // CHECK-LABEL: @vbitseti_d(
475673c5308Schenli // CHECK-NEXT:  entry:
476*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
477*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vbitseti.d(<2 x i64> [[TMP0]], i32 1)
478*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
479*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
480673c5308Schenli //
vbitseti_d(v2u64 _1)481673c5308Schenli v2u64 vbitseti_d(v2u64 _1) { return __lsx_vbitseti_d(_1, 1); }
482673c5308Schenli // CHECK-LABEL: @vbitrev_b(
483673c5308Schenli // CHECK-NEXT:  entry:
484*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
485*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
486*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vbitrev.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
487*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
488*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
489673c5308Schenli //
vbitrev_b(v16u8 _1,v16u8 _2)490673c5308Schenli v16u8 vbitrev_b(v16u8 _1, v16u8 _2) { return __lsx_vbitrev_b(_1, _2); }
491673c5308Schenli // CHECK-LABEL: @vbitrev_h(
492673c5308Schenli // CHECK-NEXT:  entry:
493*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
494*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
495*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vbitrev.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
496*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
497*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
498673c5308Schenli //
vbitrev_h(v8u16 _1,v8u16 _2)499673c5308Schenli v8u16 vbitrev_h(v8u16 _1, v8u16 _2) { return __lsx_vbitrev_h(_1, _2); }
500673c5308Schenli // CHECK-LABEL: @vbitrev_w(
501673c5308Schenli // CHECK-NEXT:  entry:
502*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
503*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
504*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vbitrev.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
505*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
506*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
507673c5308Schenli //
vbitrev_w(v4u32 _1,v4u32 _2)508673c5308Schenli v4u32 vbitrev_w(v4u32 _1, v4u32 _2) { return __lsx_vbitrev_w(_1, _2); }
509673c5308Schenli // CHECK-LABEL: @vbitrev_d(
510673c5308Schenli // CHECK-NEXT:  entry:
511*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
512*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
513*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vbitrev.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
514*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
515*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
516673c5308Schenli //
vbitrev_d(v2u64 _1,v2u64 _2)517673c5308Schenli v2u64 vbitrev_d(v2u64 _1, v2u64 _2) { return __lsx_vbitrev_d(_1, _2); }
518673c5308Schenli // CHECK-LABEL: @vbitrevi_b(
519673c5308Schenli // CHECK-NEXT:  entry:
520*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
521*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vbitrevi.b(<16 x i8> [[TMP0]], i32 1)
522*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
523*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
524673c5308Schenli //
vbitrevi_b(v16u8 _1)525673c5308Schenli v16u8 vbitrevi_b(v16u8 _1) { return __lsx_vbitrevi_b(_1, 1); }
526673c5308Schenli // CHECK-LABEL: @vbitrevi_h(
527673c5308Schenli // CHECK-NEXT:  entry:
528*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
529*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vbitrevi.h(<8 x i16> [[TMP0]], i32 1)
530*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
531*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
532673c5308Schenli //
vbitrevi_h(v8u16 _1)533673c5308Schenli v8u16 vbitrevi_h(v8u16 _1) { return __lsx_vbitrevi_h(_1, 1); }
534673c5308Schenli // CHECK-LABEL: @vbitrevi_w(
535673c5308Schenli // CHECK-NEXT:  entry:
536*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
537*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vbitrevi.w(<4 x i32> [[TMP0]], i32 1)
538*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
539*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
540673c5308Schenli //
vbitrevi_w(v4u32 _1)541673c5308Schenli v4u32 vbitrevi_w(v4u32 _1) { return __lsx_vbitrevi_w(_1, 1); }
542673c5308Schenli // CHECK-LABEL: @vbitrevi_d(
543673c5308Schenli // CHECK-NEXT:  entry:
544*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
545*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vbitrevi.d(<2 x i64> [[TMP0]], i32 1)
546*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
547*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
548673c5308Schenli //
vbitrevi_d(v2u64 _1)549673c5308Schenli v2u64 vbitrevi_d(v2u64 _1) { return __lsx_vbitrevi_d(_1, 1); }
550673c5308Schenli // CHECK-LABEL: @vadd_b(
551673c5308Schenli // CHECK-NEXT:  entry:
552*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
553*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
554*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vadd.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
555*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
556*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
557673c5308Schenli //
vadd_b(v16i8 _1,v16i8 _2)558673c5308Schenli v16i8 vadd_b(v16i8 _1, v16i8 _2) { return __lsx_vadd_b(_1, _2); }
559673c5308Schenli // CHECK-LABEL: @vadd_h(
560673c5308Schenli // CHECK-NEXT:  entry:
561*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
562*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
563*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vadd.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
564*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
565*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
566673c5308Schenli //
vadd_h(v8i16 _1,v8i16 _2)567673c5308Schenli v8i16 vadd_h(v8i16 _1, v8i16 _2) { return __lsx_vadd_h(_1, _2); }
568673c5308Schenli // CHECK-LABEL: @vadd_w(
569673c5308Schenli // CHECK-NEXT:  entry:
570*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
571*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
572*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vadd.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
573*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
574*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
575673c5308Schenli //
vadd_w(v4i32 _1,v4i32 _2)576673c5308Schenli v4i32 vadd_w(v4i32 _1, v4i32 _2) { return __lsx_vadd_w(_1, _2); }
577673c5308Schenli // CHECK-LABEL: @vadd_d(
578673c5308Schenli // CHECK-NEXT:  entry:
579*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
580*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
581*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vadd.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
582*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
583*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
584673c5308Schenli //
vadd_d(v2i64 _1,v2i64 _2)585673c5308Schenli v2i64 vadd_d(v2i64 _1, v2i64 _2) { return __lsx_vadd_d(_1, _2); }
586673c5308Schenli // CHECK-LABEL: @vaddi_bu(
587673c5308Schenli // CHECK-NEXT:  entry:
588*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
589*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vaddi.bu(<16 x i8> [[TMP0]], i32 1)
590*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
591*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
592673c5308Schenli //
vaddi_bu(v16i8 _1)593673c5308Schenli v16i8 vaddi_bu(v16i8 _1) { return __lsx_vaddi_bu(_1, 1); }
594673c5308Schenli // CHECK-LABEL: @vaddi_hu(
595673c5308Schenli // CHECK-NEXT:  entry:
596*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
597*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vaddi.hu(<8 x i16> [[TMP0]], i32 1)
598*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
599*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
600673c5308Schenli //
vaddi_hu(v8i16 _1)601673c5308Schenli v8i16 vaddi_hu(v8i16 _1) { return __lsx_vaddi_hu(_1, 1); }
602673c5308Schenli // CHECK-LABEL: @vaddi_wu(
603673c5308Schenli // CHECK-NEXT:  entry:
604*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
605*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vaddi.wu(<4 x i32> [[TMP0]], i32 1)
606*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
607*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
608673c5308Schenli //
vaddi_wu(v4i32 _1)609673c5308Schenli v4i32 vaddi_wu(v4i32 _1) { return __lsx_vaddi_wu(_1, 1); }
610673c5308Schenli // CHECK-LABEL: @vaddi_du(
611673c5308Schenli // CHECK-NEXT:  entry:
612*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
613*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vaddi.du(<2 x i64> [[TMP0]], i32 1)
614*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
615*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
616673c5308Schenli //
vaddi_du(v2i64 _1)617673c5308Schenli v2i64 vaddi_du(v2i64 _1) { return __lsx_vaddi_du(_1, 1); }
618673c5308Schenli // CHECK-LABEL: @vsub_b(
619673c5308Schenli // CHECK-NEXT:  entry:
620*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
621*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
622*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsub.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
623*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
624*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
625673c5308Schenli //
vsub_b(v16i8 _1,v16i8 _2)626673c5308Schenli v16i8 vsub_b(v16i8 _1, v16i8 _2) { return __lsx_vsub_b(_1, _2); }
627673c5308Schenli // CHECK-LABEL: @vsub_h(
628673c5308Schenli // CHECK-NEXT:  entry:
629*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
630*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
631*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsub.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
632*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
633*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
634673c5308Schenli //
vsub_h(v8i16 _1,v8i16 _2)635673c5308Schenli v8i16 vsub_h(v8i16 _1, v8i16 _2) { return __lsx_vsub_h(_1, _2); }
636673c5308Schenli // CHECK-LABEL: @vsub_w(
637673c5308Schenli // CHECK-NEXT:  entry:
638*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
639*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
640*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsub.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
641*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
642*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
643673c5308Schenli //
vsub_w(v4i32 _1,v4i32 _2)644673c5308Schenli v4i32 vsub_w(v4i32 _1, v4i32 _2) { return __lsx_vsub_w(_1, _2); }
645673c5308Schenli // CHECK-LABEL: @vsub_d(
646673c5308Schenli // CHECK-NEXT:  entry:
647*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
648*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
649*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsub.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
650*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
651*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
652673c5308Schenli //
vsub_d(v2i64 _1,v2i64 _2)653673c5308Schenli v2i64 vsub_d(v2i64 _1, v2i64 _2) { return __lsx_vsub_d(_1, _2); }
654673c5308Schenli // CHECK-LABEL: @vsubi_bu(
655673c5308Schenli // CHECK-NEXT:  entry:
656*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
657*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsubi.bu(<16 x i8> [[TMP0]], i32 1)
658*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
659*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
660673c5308Schenli //
vsubi_bu(v16i8 _1)661673c5308Schenli v16i8 vsubi_bu(v16i8 _1) { return __lsx_vsubi_bu(_1, 1); }
662673c5308Schenli // CHECK-LABEL: @vsubi_hu(
663673c5308Schenli // CHECK-NEXT:  entry:
664*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
665*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsubi.hu(<8 x i16> [[TMP0]], i32 1)
666*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
667*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
668673c5308Schenli //
vsubi_hu(v8i16 _1)669673c5308Schenli v8i16 vsubi_hu(v8i16 _1) { return __lsx_vsubi_hu(_1, 1); }
670673c5308Schenli // CHECK-LABEL: @vsubi_wu(
671673c5308Schenli // CHECK-NEXT:  entry:
672*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
673*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsubi.wu(<4 x i32> [[TMP0]], i32 1)
674*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
675*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
676673c5308Schenli //
vsubi_wu(v4i32 _1)677673c5308Schenli v4i32 vsubi_wu(v4i32 _1) { return __lsx_vsubi_wu(_1, 1); }
678673c5308Schenli // CHECK-LABEL: @vsubi_du(
679673c5308Schenli // CHECK-NEXT:  entry:
680*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
681*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsubi.du(<2 x i64> [[TMP0]], i32 1)
682*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
683*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
684673c5308Schenli //
vsubi_du(v2i64 _1)685673c5308Schenli v2i64 vsubi_du(v2i64 _1) { return __lsx_vsubi_du(_1, 1); }
686673c5308Schenli // CHECK-LABEL: @vmax_b(
687673c5308Schenli // CHECK-NEXT:  entry:
688*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
689*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
690*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmax.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
691*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
692*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
693673c5308Schenli //
vmax_b(v16i8 _1,v16i8 _2)694673c5308Schenli v16i8 vmax_b(v16i8 _1, v16i8 _2) { return __lsx_vmax_b(_1, _2); }
695673c5308Schenli // CHECK-LABEL: @vmax_h(
696673c5308Schenli // CHECK-NEXT:  entry:
697*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
698*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
699*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmax.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
700*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
701*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
702673c5308Schenli //
vmax_h(v8i16 _1,v8i16 _2)703673c5308Schenli v8i16 vmax_h(v8i16 _1, v8i16 _2) { return __lsx_vmax_h(_1, _2); }
704673c5308Schenli // CHECK-LABEL: @vmax_w(
705673c5308Schenli // CHECK-NEXT:  entry:
706*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
707*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
708*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmax.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
709*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
710*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
711673c5308Schenli //
vmax_w(v4i32 _1,v4i32 _2)712673c5308Schenli v4i32 vmax_w(v4i32 _1, v4i32 _2) { return __lsx_vmax_w(_1, _2); }
713673c5308Schenli // CHECK-LABEL: @vmax_d(
714673c5308Schenli // CHECK-NEXT:  entry:
715*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
716*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
717*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmax.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
718*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
719*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
720673c5308Schenli //
vmax_d(v2i64 _1,v2i64 _2)721673c5308Schenli v2i64 vmax_d(v2i64 _1, v2i64 _2) { return __lsx_vmax_d(_1, _2); }
722673c5308Schenli // CHECK-LABEL: @vmaxi_b(
723673c5308Schenli // CHECK-NEXT:  entry:
724*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
725*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmaxi.b(<16 x i8> [[TMP0]], i32 1)
726*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
727*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
728673c5308Schenli //
vmaxi_b(v16i8 _1)729673c5308Schenli v16i8 vmaxi_b(v16i8 _1) { return __lsx_vmaxi_b(_1, 1); }
730673c5308Schenli // CHECK-LABEL: @vmaxi_h(
731673c5308Schenli // CHECK-NEXT:  entry:
732*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
733*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmaxi.h(<8 x i16> [[TMP0]], i32 1)
734*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
735*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
736673c5308Schenli //
vmaxi_h(v8i16 _1)737673c5308Schenli v8i16 vmaxi_h(v8i16 _1) { return __lsx_vmaxi_h(_1, 1); }
738673c5308Schenli // CHECK-LABEL: @vmaxi_w(
739673c5308Schenli // CHECK-NEXT:  entry:
740*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
741*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmaxi.w(<4 x i32> [[TMP0]], i32 1)
742*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
743*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
744673c5308Schenli //
vmaxi_w(v4i32 _1)745673c5308Schenli v4i32 vmaxi_w(v4i32 _1) { return __lsx_vmaxi_w(_1, 1); }
746673c5308Schenli // CHECK-LABEL: @vmaxi_d(
747673c5308Schenli // CHECK-NEXT:  entry:
748*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
749*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmaxi.d(<2 x i64> [[TMP0]], i32 1)
750*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
751*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
752673c5308Schenli //
vmaxi_d(v2i64 _1)753673c5308Schenli v2i64 vmaxi_d(v2i64 _1) { return __lsx_vmaxi_d(_1, 1); }
754673c5308Schenli // CHECK-LABEL: @vmax_bu(
755673c5308Schenli // CHECK-NEXT:  entry:
756*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
757*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
758*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmax.bu(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
759*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
760*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
761673c5308Schenli //
vmax_bu(v16u8 _1,v16u8 _2)762673c5308Schenli v16u8 vmax_bu(v16u8 _1, v16u8 _2) { return __lsx_vmax_bu(_1, _2); }
763673c5308Schenli // CHECK-LABEL: @vmax_hu(
764673c5308Schenli // CHECK-NEXT:  entry:
765*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
766*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
767*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmax.hu(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
768*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
769*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
770673c5308Schenli //
vmax_hu(v8u16 _1,v8u16 _2)771673c5308Schenli v8u16 vmax_hu(v8u16 _1, v8u16 _2) { return __lsx_vmax_hu(_1, _2); }
772673c5308Schenli // CHECK-LABEL: @vmax_wu(
773673c5308Schenli // CHECK-NEXT:  entry:
774*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
775*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
776*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmax.wu(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
777*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
778*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
779673c5308Schenli //
vmax_wu(v4u32 _1,v4u32 _2)780673c5308Schenli v4u32 vmax_wu(v4u32 _1, v4u32 _2) { return __lsx_vmax_wu(_1, _2); }
781673c5308Schenli // CHECK-LABEL: @vmax_du(
782673c5308Schenli // CHECK-NEXT:  entry:
783*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
784*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
785*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmax.du(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
786*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
787*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
788673c5308Schenli //
vmax_du(v2u64 _1,v2u64 _2)789673c5308Schenli v2u64 vmax_du(v2u64 _1, v2u64 _2) { return __lsx_vmax_du(_1, _2); }
790673c5308Schenli // CHECK-LABEL: @vmaxi_bu(
791673c5308Schenli // CHECK-NEXT:  entry:
792*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
793*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmaxi.bu(<16 x i8> [[TMP0]], i32 1)
794*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
795*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
796673c5308Schenli //
vmaxi_bu(v16u8 _1)797673c5308Schenli v16u8 vmaxi_bu(v16u8 _1) { return __lsx_vmaxi_bu(_1, 1); }
798673c5308Schenli // CHECK-LABEL: @vmaxi_hu(
799673c5308Schenli // CHECK-NEXT:  entry:
800*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
801*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmaxi.hu(<8 x i16> [[TMP0]], i32 1)
802*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
803*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
804673c5308Schenli //
vmaxi_hu(v8u16 _1)805673c5308Schenli v8u16 vmaxi_hu(v8u16 _1) { return __lsx_vmaxi_hu(_1, 1); }
806673c5308Schenli // CHECK-LABEL: @vmaxi_wu(
807673c5308Schenli // CHECK-NEXT:  entry:
808*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
809*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmaxi.wu(<4 x i32> [[TMP0]], i32 1)
810*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
811*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
812673c5308Schenli //
vmaxi_wu(v4u32 _1)813673c5308Schenli v4u32 vmaxi_wu(v4u32 _1) { return __lsx_vmaxi_wu(_1, 1); }
814673c5308Schenli // CHECK-LABEL: @vmaxi_du(
815673c5308Schenli // CHECK-NEXT:  entry:
816*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
817*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmaxi.du(<2 x i64> [[TMP0]], i32 1)
818*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
819*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
820673c5308Schenli //
vmaxi_du(v2u64 _1)821673c5308Schenli v2u64 vmaxi_du(v2u64 _1) { return __lsx_vmaxi_du(_1, 1); }
822673c5308Schenli // CHECK-LABEL: @vmin_b(
823673c5308Schenli // CHECK-NEXT:  entry:
824*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
825*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
826*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmin.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
827*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
828*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
829673c5308Schenli //
vmin_b(v16i8 _1,v16i8 _2)830673c5308Schenli v16i8 vmin_b(v16i8 _1, v16i8 _2) { return __lsx_vmin_b(_1, _2); }
831673c5308Schenli // CHECK-LABEL: @vmin_h(
832673c5308Schenli // CHECK-NEXT:  entry:
833*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
834*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
835*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmin.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
836*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
837*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
838673c5308Schenli //
vmin_h(v8i16 _1,v8i16 _2)839673c5308Schenli v8i16 vmin_h(v8i16 _1, v8i16 _2) { return __lsx_vmin_h(_1, _2); }
840673c5308Schenli // CHECK-LABEL: @vmin_w(
841673c5308Schenli // CHECK-NEXT:  entry:
842*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
843*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
844*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmin.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
845*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
846*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
847673c5308Schenli //
vmin_w(v4i32 _1,v4i32 _2)848673c5308Schenli v4i32 vmin_w(v4i32 _1, v4i32 _2) { return __lsx_vmin_w(_1, _2); }
849673c5308Schenli // CHECK-LABEL: @vmin_d(
850673c5308Schenli // CHECK-NEXT:  entry:
851*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
852*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
853*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmin.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
854*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
855*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
856673c5308Schenli //
vmin_d(v2i64 _1,v2i64 _2)857673c5308Schenli v2i64 vmin_d(v2i64 _1, v2i64 _2) { return __lsx_vmin_d(_1, _2); }
858673c5308Schenli // CHECK-LABEL: @vmini_b(
859673c5308Schenli // CHECK-NEXT:  entry:
860*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
861*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmini.b(<16 x i8> [[TMP0]], i32 1)
862*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
863*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
864673c5308Schenli //
vmini_b(v16i8 _1)865673c5308Schenli v16i8 vmini_b(v16i8 _1) { return __lsx_vmini_b(_1, 1); }
866673c5308Schenli // CHECK-LABEL: @vmini_h(
867673c5308Schenli // CHECK-NEXT:  entry:
868*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
869*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmini.h(<8 x i16> [[TMP0]], i32 1)
870*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
871*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
872673c5308Schenli //
vmini_h(v8i16 _1)873673c5308Schenli v8i16 vmini_h(v8i16 _1) { return __lsx_vmini_h(_1, 1); }
874673c5308Schenli // CHECK-LABEL: @vmini_w(
875673c5308Schenli // CHECK-NEXT:  entry:
876*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
877*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmini.w(<4 x i32> [[TMP0]], i32 1)
878*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
879*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
880673c5308Schenli //
vmini_w(v4i32 _1)881673c5308Schenli v4i32 vmini_w(v4i32 _1) { return __lsx_vmini_w(_1, 1); }
882673c5308Schenli // CHECK-LABEL: @vmini_d(
883673c5308Schenli // CHECK-NEXT:  entry:
884*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
885*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmini.d(<2 x i64> [[TMP0]], i32 1)
886*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
887*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
888673c5308Schenli //
vmini_d(v2i64 _1)889673c5308Schenli v2i64 vmini_d(v2i64 _1) { return __lsx_vmini_d(_1, 1); }
890673c5308Schenli // CHECK-LABEL: @vmin_bu(
891673c5308Schenli // CHECK-NEXT:  entry:
892*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
893*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
894*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmin.bu(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
895*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
896*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
897673c5308Schenli //
vmin_bu(v16u8 _1,v16u8 _2)898673c5308Schenli v16u8 vmin_bu(v16u8 _1, v16u8 _2) { return __lsx_vmin_bu(_1, _2); }
899673c5308Schenli // CHECK-LABEL: @vmin_hu(
900673c5308Schenli // CHECK-NEXT:  entry:
901*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
902*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
903*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmin.hu(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
904*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
905*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
906673c5308Schenli //
vmin_hu(v8u16 _1,v8u16 _2)907673c5308Schenli v8u16 vmin_hu(v8u16 _1, v8u16 _2) { return __lsx_vmin_hu(_1, _2); }
908673c5308Schenli // CHECK-LABEL: @vmin_wu(
909673c5308Schenli // CHECK-NEXT:  entry:
910*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
911*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
912*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmin.wu(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
913*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
914*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
915673c5308Schenli //
vmin_wu(v4u32 _1,v4u32 _2)916673c5308Schenli v4u32 vmin_wu(v4u32 _1, v4u32 _2) { return __lsx_vmin_wu(_1, _2); }
917673c5308Schenli // CHECK-LABEL: @vmin_du(
918673c5308Schenli // CHECK-NEXT:  entry:
919*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
920*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
921*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmin.du(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
922*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
923*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
924673c5308Schenli //
vmin_du(v2u64 _1,v2u64 _2)925673c5308Schenli v2u64 vmin_du(v2u64 _1, v2u64 _2) { return __lsx_vmin_du(_1, _2); }
926673c5308Schenli // CHECK-LABEL: @vmini_bu(
927673c5308Schenli // CHECK-NEXT:  entry:
928*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
929*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmini.bu(<16 x i8> [[TMP0]], i32 1)
930*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
931*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
932673c5308Schenli //
vmini_bu(v16u8 _1)933673c5308Schenli v16u8 vmini_bu(v16u8 _1) { return __lsx_vmini_bu(_1, 1); }
934673c5308Schenli // CHECK-LABEL: @vmini_hu(
935673c5308Schenli // CHECK-NEXT:  entry:
936*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
937*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmini.hu(<8 x i16> [[TMP0]], i32 1)
938*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
939*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
940673c5308Schenli //
vmini_hu(v8u16 _1)941673c5308Schenli v8u16 vmini_hu(v8u16 _1) { return __lsx_vmini_hu(_1, 1); }
942673c5308Schenli // CHECK-LABEL: @vmini_wu(
943673c5308Schenli // CHECK-NEXT:  entry:
944*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
945*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmini.wu(<4 x i32> [[TMP0]], i32 1)
946*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
947*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
948673c5308Schenli //
vmini_wu(v4u32 _1)949673c5308Schenli v4u32 vmini_wu(v4u32 _1) { return __lsx_vmini_wu(_1, 1); }
950673c5308Schenli // CHECK-LABEL: @vmini_du(
951673c5308Schenli // CHECK-NEXT:  entry:
952*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
953*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmini.du(<2 x i64> [[TMP0]], i32 1)
954*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
955*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
956673c5308Schenli //
vmini_du(v2u64 _1)957673c5308Schenli v2u64 vmini_du(v2u64 _1) { return __lsx_vmini_du(_1, 1); }
958673c5308Schenli // CHECK-LABEL: @vseq_b(
959673c5308Schenli // CHECK-NEXT:  entry:
960*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
961*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
962*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vseq.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
963*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
964*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
965673c5308Schenli //
vseq_b(v16i8 _1,v16i8 _2)966673c5308Schenli v16i8 vseq_b(v16i8 _1, v16i8 _2) { return __lsx_vseq_b(_1, _2); }
967673c5308Schenli // CHECK-LABEL: @vseq_h(
968673c5308Schenli // CHECK-NEXT:  entry:
969*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
970*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
971*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vseq.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
972*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
973*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
974673c5308Schenli //
vseq_h(v8i16 _1,v8i16 _2)975673c5308Schenli v8i16 vseq_h(v8i16 _1, v8i16 _2) { return __lsx_vseq_h(_1, _2); }
976673c5308Schenli // CHECK-LABEL: @vseq_w(
977673c5308Schenli // CHECK-NEXT:  entry:
978*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
979*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
980*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vseq.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
981*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
982*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
983673c5308Schenli //
vseq_w(v4i32 _1,v4i32 _2)984673c5308Schenli v4i32 vseq_w(v4i32 _1, v4i32 _2) { return __lsx_vseq_w(_1, _2); }
985673c5308Schenli // CHECK-LABEL: @vseq_d(
986673c5308Schenli // CHECK-NEXT:  entry:
987*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
988*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
989*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vseq.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
990*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
991*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
992673c5308Schenli //
vseq_d(v2i64 _1,v2i64 _2)993673c5308Schenli v2i64 vseq_d(v2i64 _1, v2i64 _2) { return __lsx_vseq_d(_1, _2); }
994673c5308Schenli // CHECK-LABEL: @vseqi_b(
995673c5308Schenli // CHECK-NEXT:  entry:
996*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
997*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vseqi.b(<16 x i8> [[TMP0]], i32 1)
998*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
999*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
1000673c5308Schenli //
vseqi_b(v16i8 _1)1001673c5308Schenli v16i8 vseqi_b(v16i8 _1) { return __lsx_vseqi_b(_1, 1); }
1002673c5308Schenli // CHECK-LABEL: @vseqi_h(
1003673c5308Schenli // CHECK-NEXT:  entry:
1004*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
1005*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vseqi.h(<8 x i16> [[TMP0]], i32 1)
1006*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
1007*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
1008673c5308Schenli //
vseqi_h(v8i16 _1)1009673c5308Schenli v8i16 vseqi_h(v8i16 _1) { return __lsx_vseqi_h(_1, 1); }
1010673c5308Schenli // CHECK-LABEL: @vseqi_w(
1011673c5308Schenli // CHECK-NEXT:  entry:
1012*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
1013*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vseqi.w(<4 x i32> [[TMP0]], i32 1)
1014*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
1015*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
1016673c5308Schenli //
vseqi_w(v4i32 _1)1017673c5308Schenli v4i32 vseqi_w(v4i32 _1) { return __lsx_vseqi_w(_1, 1); }
1018673c5308Schenli // CHECK-LABEL: @vseqi_d(
1019673c5308Schenli // CHECK-NEXT:  entry:
1020*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
1021*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vseqi.d(<2 x i64> [[TMP0]], i32 1)
1022*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
1023*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
1024673c5308Schenli //
vseqi_d(v2i64 _1)1025673c5308Schenli v2i64 vseqi_d(v2i64 _1) { return __lsx_vseqi_d(_1, 1); }
1026673c5308Schenli // CHECK-LABEL: @vslti_b(
1027673c5308Schenli // CHECK-NEXT:  entry:
1028*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
1029*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vslti.b(<16 x i8> [[TMP0]], i32 1)
1030*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
1031*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
1032673c5308Schenli //
vslti_b(v16i8 _1)1033673c5308Schenli v16i8 vslti_b(v16i8 _1) { return __lsx_vslti_b(_1, 1); }
1034673c5308Schenli // CHECK-LABEL: @vslt_b(
1035673c5308Schenli // CHECK-NEXT:  entry:
1036*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
1037*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
1038*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vslt.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
1039*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
1040*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1041673c5308Schenli //
vslt_b(v16i8 _1,v16i8 _2)1042673c5308Schenli v16i8 vslt_b(v16i8 _1, v16i8 _2) { return __lsx_vslt_b(_1, _2); }
1043673c5308Schenli // CHECK-LABEL: @vslt_h(
1044673c5308Schenli // CHECK-NEXT:  entry:
1045*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
1046*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
1047*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vslt.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
1048*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
1049*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1050673c5308Schenli //
vslt_h(v8i16 _1,v8i16 _2)1051673c5308Schenli v8i16 vslt_h(v8i16 _1, v8i16 _2) { return __lsx_vslt_h(_1, _2); }
1052673c5308Schenli // CHECK-LABEL: @vslt_w(
1053673c5308Schenli // CHECK-NEXT:  entry:
1054*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
1055*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
1056*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vslt.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
1057*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
1058*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1059673c5308Schenli //
vslt_w(v4i32 _1,v4i32 _2)1060673c5308Schenli v4i32 vslt_w(v4i32 _1, v4i32 _2) { return __lsx_vslt_w(_1, _2); }
1061673c5308Schenli // CHECK-LABEL: @vslt_d(
1062673c5308Schenli // CHECK-NEXT:  entry:
1063*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
1064*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
1065*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vslt.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
1066*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
1067*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1068673c5308Schenli //
vslt_d(v2i64 _1,v2i64 _2)1069673c5308Schenli v2i64 vslt_d(v2i64 _1, v2i64 _2) { return __lsx_vslt_d(_1, _2); }
1070673c5308Schenli // CHECK-LABEL: @vslti_h(
1071673c5308Schenli // CHECK-NEXT:  entry:
1072*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
1073*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vslti.h(<8 x i16> [[TMP0]], i32 1)
1074*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
1075*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
1076673c5308Schenli //
vslti_h(v8i16 _1)1077673c5308Schenli v8i16 vslti_h(v8i16 _1) { return __lsx_vslti_h(_1, 1); }
1078673c5308Schenli // CHECK-LABEL: @vslti_w(
1079673c5308Schenli // CHECK-NEXT:  entry:
1080*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
1081*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vslti.w(<4 x i32> [[TMP0]], i32 1)
1082*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
1083*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
1084673c5308Schenli //
vslti_w(v4i32 _1)1085673c5308Schenli v4i32 vslti_w(v4i32 _1) { return __lsx_vslti_w(_1, 1); }
1086673c5308Schenli // CHECK-LABEL: @vslti_d(
1087673c5308Schenli // CHECK-NEXT:  entry:
1088*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
1089*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vslti.d(<2 x i64> [[TMP0]], i32 1)
1090*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
1091*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
1092673c5308Schenli //
vslti_d(v2i64 _1)1093673c5308Schenli v2i64 vslti_d(v2i64 _1) { return __lsx_vslti_d(_1, 1); }
1094673c5308Schenli // CHECK-LABEL: @vslt_bu(
1095673c5308Schenli // CHECK-NEXT:  entry:
1096*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
1097*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
1098*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vslt.bu(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
1099*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
1100*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1101673c5308Schenli //
vslt_bu(v16u8 _1,v16u8 _2)1102673c5308Schenli v16i8 vslt_bu(v16u8 _1, v16u8 _2) { return __lsx_vslt_bu(_1, _2); }
1103673c5308Schenli // CHECK-LABEL: @vslt_hu(
1104673c5308Schenli // CHECK-NEXT:  entry:
1105*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
1106*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
1107*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vslt.hu(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
1108*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
1109*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1110673c5308Schenli //
vslt_hu(v8u16 _1,v8u16 _2)1111673c5308Schenli v8i16 vslt_hu(v8u16 _1, v8u16 _2) { return __lsx_vslt_hu(_1, _2); }
1112673c5308Schenli // CHECK-LABEL: @vslt_wu(
1113673c5308Schenli // CHECK-NEXT:  entry:
1114*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
1115*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
1116*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vslt.wu(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
1117*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
1118*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1119673c5308Schenli //
vslt_wu(v4u32 _1,v4u32 _2)1120673c5308Schenli v4i32 vslt_wu(v4u32 _1, v4u32 _2) { return __lsx_vslt_wu(_1, _2); }
1121673c5308Schenli // CHECK-LABEL: @vslt_du(
1122673c5308Schenli // CHECK-NEXT:  entry:
1123*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
1124*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
1125*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vslt.du(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
1126*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
1127*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1128673c5308Schenli //
vslt_du(v2u64 _1,v2u64 _2)1129673c5308Schenli v2i64 vslt_du(v2u64 _1, v2u64 _2) { return __lsx_vslt_du(_1, _2); }
1130673c5308Schenli // CHECK-LABEL: @vslti_bu(
1131673c5308Schenli // CHECK-NEXT:  entry:
1132*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
1133*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vslti.bu(<16 x i8> [[TMP0]], i32 1)
1134*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
1135*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
1136673c5308Schenli //
vslti_bu(v16u8 _1)1137673c5308Schenli v16i8 vslti_bu(v16u8 _1) { return __lsx_vslti_bu(_1, 1); }
1138673c5308Schenli // CHECK-LABEL: @vslti_hu(
1139673c5308Schenli // CHECK-NEXT:  entry:
1140*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
1141*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vslti.hu(<8 x i16> [[TMP0]], i32 1)
1142*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
1143*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
1144673c5308Schenli //
vslti_hu(v8u16 _1)1145673c5308Schenli v8i16 vslti_hu(v8u16 _1) { return __lsx_vslti_hu(_1, 1); }
1146673c5308Schenli // CHECK-LABEL: @vslti_wu(
1147673c5308Schenli // CHECK-NEXT:  entry:
1148*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
1149*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vslti.wu(<4 x i32> [[TMP0]], i32 1)
1150*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
1151*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
1152673c5308Schenli //
vslti_wu(v4u32 _1)1153673c5308Schenli v4i32 vslti_wu(v4u32 _1) { return __lsx_vslti_wu(_1, 1); }
1154673c5308Schenli // CHECK-LABEL: @vslti_du(
1155673c5308Schenli // CHECK-NEXT:  entry:
1156*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
1157*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vslti.du(<2 x i64> [[TMP0]], i32 1)
1158*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
1159*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
1160673c5308Schenli //
vslti_du(v2u64 _1)1161673c5308Schenli v2i64 vslti_du(v2u64 _1) { return __lsx_vslti_du(_1, 1); }
1162673c5308Schenli // CHECK-LABEL: @vsle_b(
1163673c5308Schenli // CHECK-NEXT:  entry:
1164*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
1165*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
1166*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsle.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
1167*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
1168*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1169673c5308Schenli //
vsle_b(v16i8 _1,v16i8 _2)1170673c5308Schenli v16i8 vsle_b(v16i8 _1, v16i8 _2) { return __lsx_vsle_b(_1, _2); }
1171673c5308Schenli // CHECK-LABEL: @vsle_h(
1172673c5308Schenli // CHECK-NEXT:  entry:
1173*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
1174*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
1175*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsle.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
1176*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
1177*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1178673c5308Schenli //
vsle_h(v8i16 _1,v8i16 _2)1179673c5308Schenli v8i16 vsle_h(v8i16 _1, v8i16 _2) { return __lsx_vsle_h(_1, _2); }
1180673c5308Schenli // CHECK-LABEL: @vsle_w(
1181673c5308Schenli // CHECK-NEXT:  entry:
1182*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
1183*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
1184*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsle.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
1185*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
1186*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1187673c5308Schenli //
vsle_w(v4i32 _1,v4i32 _2)1188673c5308Schenli v4i32 vsle_w(v4i32 _1, v4i32 _2) { return __lsx_vsle_w(_1, _2); }
1189673c5308Schenli // CHECK-LABEL: @vsle_d(
1190673c5308Schenli // CHECK-NEXT:  entry:
1191*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
1192*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
1193*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsle.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
1194*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
1195*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1196673c5308Schenli //
vsle_d(v2i64 _1,v2i64 _2)1197673c5308Schenli v2i64 vsle_d(v2i64 _1, v2i64 _2) { return __lsx_vsle_d(_1, _2); }
1198673c5308Schenli // CHECK-LABEL: @vslei_b(
1199673c5308Schenli // CHECK-NEXT:  entry:
1200*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
1201*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vslei.b(<16 x i8> [[TMP0]], i32 1)
1202*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
1203*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
1204673c5308Schenli //
vslei_b(v16i8 _1)1205673c5308Schenli v16i8 vslei_b(v16i8 _1) { return __lsx_vslei_b(_1, 1); }
1206673c5308Schenli // CHECK-LABEL: @vslei_h(
1207673c5308Schenli // CHECK-NEXT:  entry:
1208*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
1209*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vslei.h(<8 x i16> [[TMP0]], i32 1)
1210*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
1211*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
1212673c5308Schenli //
vslei_h(v8i16 _1)1213673c5308Schenli v8i16 vslei_h(v8i16 _1) { return __lsx_vslei_h(_1, 1); }
1214673c5308Schenli // CHECK-LABEL: @vslei_w(
1215673c5308Schenli // CHECK-NEXT:  entry:
1216*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
1217*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vslei.w(<4 x i32> [[TMP0]], i32 1)
1218*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
1219*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
1220673c5308Schenli //
vslei_w(v4i32 _1)1221673c5308Schenli v4i32 vslei_w(v4i32 _1) { return __lsx_vslei_w(_1, 1); }
1222673c5308Schenli // CHECK-LABEL: @vslei_d(
1223673c5308Schenli // CHECK-NEXT:  entry:
1224*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
1225*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vslei.d(<2 x i64> [[TMP0]], i32 1)
1226*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
1227*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
1228673c5308Schenli //
vslei_d(v2i64 _1)1229673c5308Schenli v2i64 vslei_d(v2i64 _1) { return __lsx_vslei_d(_1, 1); }
1230673c5308Schenli // CHECK-LABEL: @vsle_bu(
1231673c5308Schenli // CHECK-NEXT:  entry:
1232*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
1233*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
1234*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsle.bu(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
1235*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
1236*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1237673c5308Schenli //
vsle_bu(v16u8 _1,v16u8 _2)1238673c5308Schenli v16i8 vsle_bu(v16u8 _1, v16u8 _2) { return __lsx_vsle_bu(_1, _2); }
1239673c5308Schenli // CHECK-LABEL: @vsle_hu(
1240673c5308Schenli // CHECK-NEXT:  entry:
1241*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
1242*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
1243*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsle.hu(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
1244*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
1245*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1246673c5308Schenli //
vsle_hu(v8u16 _1,v8u16 _2)1247673c5308Schenli v8i16 vsle_hu(v8u16 _1, v8u16 _2) { return __lsx_vsle_hu(_1, _2); }
1248673c5308Schenli // CHECK-LABEL: @vsle_wu(
1249673c5308Schenli // CHECK-NEXT:  entry:
1250*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
1251*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
1252*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsle.wu(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
1253*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
1254*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1255673c5308Schenli //
vsle_wu(v4u32 _1,v4u32 _2)1256673c5308Schenli v4i32 vsle_wu(v4u32 _1, v4u32 _2) { return __lsx_vsle_wu(_1, _2); }
1257673c5308Schenli // CHECK-LABEL: @vsle_du(
1258673c5308Schenli // CHECK-NEXT:  entry:
1259*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
1260*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
1261*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsle.du(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
1262*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
1263*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1264673c5308Schenli //
vsle_du(v2u64 _1,v2u64 _2)1265673c5308Schenli v2i64 vsle_du(v2u64 _1, v2u64 _2) { return __lsx_vsle_du(_1, _2); }
1266673c5308Schenli // CHECK-LABEL: @vslei_bu(
1267673c5308Schenli // CHECK-NEXT:  entry:
1268*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
1269*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vslei.bu(<16 x i8> [[TMP0]], i32 1)
1270*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
1271*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
1272673c5308Schenli //
vslei_bu(v16u8 _1)1273673c5308Schenli v16i8 vslei_bu(v16u8 _1) { return __lsx_vslei_bu(_1, 1); }
1274673c5308Schenli // CHECK-LABEL: @vslei_hu(
1275673c5308Schenli // CHECK-NEXT:  entry:
1276*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
1277*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vslei.hu(<8 x i16> [[TMP0]], i32 1)
1278*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
1279*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
1280673c5308Schenli //
vslei_hu(v8u16 _1)1281673c5308Schenli v8i16 vslei_hu(v8u16 _1) { return __lsx_vslei_hu(_1, 1); }
1282673c5308Schenli // CHECK-LABEL: @vslei_wu(
1283673c5308Schenli // CHECK-NEXT:  entry:
1284*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
1285*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vslei.wu(<4 x i32> [[TMP0]], i32 1)
1286*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
1287*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
1288673c5308Schenli //
vslei_wu(v4u32 _1)1289673c5308Schenli v4i32 vslei_wu(v4u32 _1) { return __lsx_vslei_wu(_1, 1); }
1290673c5308Schenli // CHECK-LABEL: @vslei_du(
1291673c5308Schenli // CHECK-NEXT:  entry:
1292*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
1293*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vslei.du(<2 x i64> [[TMP0]], i32 1)
1294*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
1295*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
1296673c5308Schenli //
vslei_du(v2u64 _1)1297673c5308Schenli v2i64 vslei_du(v2u64 _1) { return __lsx_vslei_du(_1, 1); }
1298673c5308Schenli // CHECK-LABEL: @vsat_b(
1299673c5308Schenli // CHECK-NEXT:  entry:
1300*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
1301*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsat.b(<16 x i8> [[TMP0]], i32 1)
1302*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
1303*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
1304673c5308Schenli //
vsat_b(v16i8 _1)1305673c5308Schenli v16i8 vsat_b(v16i8 _1) { return __lsx_vsat_b(_1, 1); }
1306673c5308Schenli // CHECK-LABEL: @vsat_h(
1307673c5308Schenli // CHECK-NEXT:  entry:
1308*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
1309*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsat.h(<8 x i16> [[TMP0]], i32 1)
1310*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
1311*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
1312673c5308Schenli //
vsat_h(v8i16 _1)1313673c5308Schenli v8i16 vsat_h(v8i16 _1) { return __lsx_vsat_h(_1, 1); }
1314673c5308Schenli // CHECK-LABEL: @vsat_w(
1315673c5308Schenli // CHECK-NEXT:  entry:
1316*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
1317*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsat.w(<4 x i32> [[TMP0]], i32 1)
1318*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
1319*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
1320673c5308Schenli //
vsat_w(v4i32 _1)1321673c5308Schenli v4i32 vsat_w(v4i32 _1) { return __lsx_vsat_w(_1, 1); }
1322673c5308Schenli // CHECK-LABEL: @vsat_d(
1323673c5308Schenli // CHECK-NEXT:  entry:
1324*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
1325*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsat.d(<2 x i64> [[TMP0]], i32 1)
1326*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
1327*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
1328673c5308Schenli //
vsat_d(v2i64 _1)1329673c5308Schenli v2i64 vsat_d(v2i64 _1) { return __lsx_vsat_d(_1, 1); }
1330673c5308Schenli // CHECK-LABEL: @vsat_bu(
1331673c5308Schenli // CHECK-NEXT:  entry:
1332*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
1333*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsat.bu(<16 x i8> [[TMP0]], i32 1)
1334*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
1335*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
1336673c5308Schenli //
vsat_bu(v16u8 _1)1337673c5308Schenli v16u8 vsat_bu(v16u8 _1) { return __lsx_vsat_bu(_1, 1); }
1338673c5308Schenli // CHECK-LABEL: @vsat_hu(
1339673c5308Schenli // CHECK-NEXT:  entry:
1340*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
1341*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsat.hu(<8 x i16> [[TMP0]], i32 1)
1342*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
1343*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
1344673c5308Schenli //
vsat_hu(v8u16 _1)1345673c5308Schenli v8u16 vsat_hu(v8u16 _1) { return __lsx_vsat_hu(_1, 1); }
1346673c5308Schenli // CHECK-LABEL: @vsat_wu(
1347673c5308Schenli // CHECK-NEXT:  entry:
1348*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
1349*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsat.wu(<4 x i32> [[TMP0]], i32 1)
1350*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
1351*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
1352673c5308Schenli //
vsat_wu(v4u32 _1)1353673c5308Schenli v4u32 vsat_wu(v4u32 _1) { return __lsx_vsat_wu(_1, 1); }
1354673c5308Schenli // CHECK-LABEL: @vsat_du(
1355673c5308Schenli // CHECK-NEXT:  entry:
1356*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
1357*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsat.du(<2 x i64> [[TMP0]], i32 1)
1358*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
1359*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
1360673c5308Schenli //
vsat_du(v2u64 _1)1361673c5308Schenli v2u64 vsat_du(v2u64 _1) { return __lsx_vsat_du(_1, 1); }
1362673c5308Schenli // CHECK-LABEL: @vadda_b(
1363673c5308Schenli // CHECK-NEXT:  entry:
1364*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
1365*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
1366*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vadda.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
1367*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
1368*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1369673c5308Schenli //
vadda_b(v16i8 _1,v16i8 _2)1370673c5308Schenli v16i8 vadda_b(v16i8 _1, v16i8 _2) { return __lsx_vadda_b(_1, _2); }
1371673c5308Schenli // CHECK-LABEL: @vadda_h(
1372673c5308Schenli // CHECK-NEXT:  entry:
1373*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
1374*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
1375*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vadda.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
1376*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
1377*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1378673c5308Schenli //
vadda_h(v8i16 _1,v8i16 _2)1379673c5308Schenli v8i16 vadda_h(v8i16 _1, v8i16 _2) { return __lsx_vadda_h(_1, _2); }
1380673c5308Schenli // CHECK-LABEL: @vadda_w(
1381673c5308Schenli // CHECK-NEXT:  entry:
1382*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
1383*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
1384*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vadda.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
1385*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
1386*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1387673c5308Schenli //
vadda_w(v4i32 _1,v4i32 _2)1388673c5308Schenli v4i32 vadda_w(v4i32 _1, v4i32 _2) { return __lsx_vadda_w(_1, _2); }
1389673c5308Schenli // CHECK-LABEL: @vadda_d(
1390673c5308Schenli // CHECK-NEXT:  entry:
1391*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
1392*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
1393*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vadda.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
1394*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
1395*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1396673c5308Schenli //
vadda_d(v2i64 _1,v2i64 _2)1397673c5308Schenli v2i64 vadda_d(v2i64 _1, v2i64 _2) { return __lsx_vadda_d(_1, _2); }
1398673c5308Schenli // CHECK-LABEL: @vsadd_b(
1399673c5308Schenli // CHECK-NEXT:  entry:
1400*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
1401*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
1402*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsadd.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
1403*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
1404*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1405673c5308Schenli //
vsadd_b(v16i8 _1,v16i8 _2)1406673c5308Schenli v16i8 vsadd_b(v16i8 _1, v16i8 _2) { return __lsx_vsadd_b(_1, _2); }
1407673c5308Schenli // CHECK-LABEL: @vsadd_h(
1408673c5308Schenli // CHECK-NEXT:  entry:
1409*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
1410*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
1411*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsadd.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
1412*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
1413*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1414673c5308Schenli //
vsadd_h(v8i16 _1,v8i16 _2)1415673c5308Schenli v8i16 vsadd_h(v8i16 _1, v8i16 _2) { return __lsx_vsadd_h(_1, _2); }
1416673c5308Schenli // CHECK-LABEL: @vsadd_w(
1417673c5308Schenli // CHECK-NEXT:  entry:
1418*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
1419*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
1420*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsadd.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
1421*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
1422*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1423673c5308Schenli //
vsadd_w(v4i32 _1,v4i32 _2)1424673c5308Schenli v4i32 vsadd_w(v4i32 _1, v4i32 _2) { return __lsx_vsadd_w(_1, _2); }
1425673c5308Schenli // CHECK-LABEL: @vsadd_d(
1426673c5308Schenli // CHECK-NEXT:  entry:
1427*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
1428*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
1429*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsadd.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
1430*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
1431*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1432673c5308Schenli //
vsadd_d(v2i64 _1,v2i64 _2)1433673c5308Schenli v2i64 vsadd_d(v2i64 _1, v2i64 _2) { return __lsx_vsadd_d(_1, _2); }
1434673c5308Schenli // CHECK-LABEL: @vsadd_bu(
1435673c5308Schenli // CHECK-NEXT:  entry:
1436*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
1437*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
1438*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsadd.bu(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
1439*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
1440*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1441673c5308Schenli //
vsadd_bu(v16u8 _1,v16u8 _2)1442673c5308Schenli v16u8 vsadd_bu(v16u8 _1, v16u8 _2) { return __lsx_vsadd_bu(_1, _2); }
1443673c5308Schenli // CHECK-LABEL: @vsadd_hu(
1444673c5308Schenli // CHECK-NEXT:  entry:
1445*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
1446*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
1447*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsadd.hu(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
1448*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
1449*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1450673c5308Schenli //
vsadd_hu(v8u16 _1,v8u16 _2)1451673c5308Schenli v8u16 vsadd_hu(v8u16 _1, v8u16 _2) { return __lsx_vsadd_hu(_1, _2); }
1452673c5308Schenli // CHECK-LABEL: @vsadd_wu(
1453673c5308Schenli // CHECK-NEXT:  entry:
1454*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
1455*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
1456*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsadd.wu(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
1457*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
1458*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1459673c5308Schenli //
vsadd_wu(v4u32 _1,v4u32 _2)1460673c5308Schenli v4u32 vsadd_wu(v4u32 _1, v4u32 _2) { return __lsx_vsadd_wu(_1, _2); }
1461673c5308Schenli // CHECK-LABEL: @vsadd_du(
1462673c5308Schenli // CHECK-NEXT:  entry:
1463*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
1464*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
1465*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsadd.du(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
1466*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
1467*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1468673c5308Schenli //
vsadd_du(v2u64 _1,v2u64 _2)1469673c5308Schenli v2u64 vsadd_du(v2u64 _1, v2u64 _2) { return __lsx_vsadd_du(_1, _2); }
1470673c5308Schenli // CHECK-LABEL: @vavg_b(
1471673c5308Schenli // CHECK-NEXT:  entry:
1472*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
1473*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
1474*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vavg.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
1475*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
1476*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1477673c5308Schenli //
vavg_b(v16i8 _1,v16i8 _2)1478673c5308Schenli v16i8 vavg_b(v16i8 _1, v16i8 _2) { return __lsx_vavg_b(_1, _2); }
1479673c5308Schenli // CHECK-LABEL: @vavg_h(
1480673c5308Schenli // CHECK-NEXT:  entry:
1481*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
1482*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
1483*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vavg.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
1484*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
1485*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1486673c5308Schenli //
vavg_h(v8i16 _1,v8i16 _2)1487673c5308Schenli v8i16 vavg_h(v8i16 _1, v8i16 _2) { return __lsx_vavg_h(_1, _2); }
1488673c5308Schenli // CHECK-LABEL: @vavg_w(
1489673c5308Schenli // CHECK-NEXT:  entry:
1490*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
1491*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
1492*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vavg.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
1493*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
1494*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1495673c5308Schenli //
vavg_w(v4i32 _1,v4i32 _2)1496673c5308Schenli v4i32 vavg_w(v4i32 _1, v4i32 _2) { return __lsx_vavg_w(_1, _2); }
1497673c5308Schenli // CHECK-LABEL: @vavg_d(
1498673c5308Schenli // CHECK-NEXT:  entry:
1499*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
1500*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
1501*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vavg.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
1502*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
1503*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1504673c5308Schenli //
vavg_d(v2i64 _1,v2i64 _2)1505673c5308Schenli v2i64 vavg_d(v2i64 _1, v2i64 _2) { return __lsx_vavg_d(_1, _2); }
1506673c5308Schenli // CHECK-LABEL: @vavg_bu(
1507673c5308Schenli // CHECK-NEXT:  entry:
1508*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
1509*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
1510*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vavg.bu(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
1511*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
1512*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1513673c5308Schenli //
vavg_bu(v16u8 _1,v16u8 _2)1514673c5308Schenli v16u8 vavg_bu(v16u8 _1, v16u8 _2) { return __lsx_vavg_bu(_1, _2); }
1515673c5308Schenli // CHECK-LABEL: @vavg_hu(
1516673c5308Schenli // CHECK-NEXT:  entry:
1517*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
1518*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
1519*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vavg.hu(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
1520*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
1521*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1522673c5308Schenli //
vavg_hu(v8u16 _1,v8u16 _2)1523673c5308Schenli v8u16 vavg_hu(v8u16 _1, v8u16 _2) { return __lsx_vavg_hu(_1, _2); }
1524673c5308Schenli // CHECK-LABEL: @vavg_wu(
1525673c5308Schenli // CHECK-NEXT:  entry:
1526*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
1527*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
1528*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vavg.wu(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
1529*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
1530*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1531673c5308Schenli //
vavg_wu(v4u32 _1,v4u32 _2)1532673c5308Schenli v4u32 vavg_wu(v4u32 _1, v4u32 _2) { return __lsx_vavg_wu(_1, _2); }
1533673c5308Schenli // CHECK-LABEL: @vavg_du(
1534673c5308Schenli // CHECK-NEXT:  entry:
1535*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
1536*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
1537*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vavg.du(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
1538*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
1539*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1540673c5308Schenli //
vavg_du(v2u64 _1,v2u64 _2)1541673c5308Schenli v2u64 vavg_du(v2u64 _1, v2u64 _2) { return __lsx_vavg_du(_1, _2); }
1542673c5308Schenli // CHECK-LABEL: @vavgr_b(
1543673c5308Schenli // CHECK-NEXT:  entry:
1544*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
1545*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
1546*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vavgr.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
1547*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
1548*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1549673c5308Schenli //
vavgr_b(v16i8 _1,v16i8 _2)1550673c5308Schenli v16i8 vavgr_b(v16i8 _1, v16i8 _2) { return __lsx_vavgr_b(_1, _2); }
1551673c5308Schenli // CHECK-LABEL: @vavgr_h(
1552673c5308Schenli // CHECK-NEXT:  entry:
1553*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
1554*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
1555*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vavgr.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
1556*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
1557*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1558673c5308Schenli //
vavgr_h(v8i16 _1,v8i16 _2)1559673c5308Schenli v8i16 vavgr_h(v8i16 _1, v8i16 _2) { return __lsx_vavgr_h(_1, _2); }
1560673c5308Schenli // CHECK-LABEL: @vavgr_w(
1561673c5308Schenli // CHECK-NEXT:  entry:
1562*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
1563*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
1564*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vavgr.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
1565*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
1566*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1567673c5308Schenli //
vavgr_w(v4i32 _1,v4i32 _2)1568673c5308Schenli v4i32 vavgr_w(v4i32 _1, v4i32 _2) { return __lsx_vavgr_w(_1, _2); }
1569673c5308Schenli // CHECK-LABEL: @vavgr_d(
1570673c5308Schenli // CHECK-NEXT:  entry:
1571*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
1572*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
1573*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vavgr.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
1574*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
1575*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1576673c5308Schenli //
vavgr_d(v2i64 _1,v2i64 _2)1577673c5308Schenli v2i64 vavgr_d(v2i64 _1, v2i64 _2) { return __lsx_vavgr_d(_1, _2); }
1578673c5308Schenli // CHECK-LABEL: @vavgr_bu(
1579673c5308Schenli // CHECK-NEXT:  entry:
1580*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
1581*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
1582*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vavgr.bu(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
1583*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
1584*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1585673c5308Schenli //
vavgr_bu(v16u8 _1,v16u8 _2)1586673c5308Schenli v16u8 vavgr_bu(v16u8 _1, v16u8 _2) { return __lsx_vavgr_bu(_1, _2); }
1587673c5308Schenli // CHECK-LABEL: @vavgr_hu(
1588673c5308Schenli // CHECK-NEXT:  entry:
1589*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
1590*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
1591*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vavgr.hu(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
1592*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
1593*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1594673c5308Schenli //
vavgr_hu(v8u16 _1,v8u16 _2)1595673c5308Schenli v8u16 vavgr_hu(v8u16 _1, v8u16 _2) { return __lsx_vavgr_hu(_1, _2); }
1596673c5308Schenli // CHECK-LABEL: @vavgr_wu(
1597673c5308Schenli // CHECK-NEXT:  entry:
1598*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
1599*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
1600*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vavgr.wu(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
1601*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
1602*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1603673c5308Schenli //
vavgr_wu(v4u32 _1,v4u32 _2)1604673c5308Schenli v4u32 vavgr_wu(v4u32 _1, v4u32 _2) { return __lsx_vavgr_wu(_1, _2); }
1605673c5308Schenli // CHECK-LABEL: @vavgr_du(
1606673c5308Schenli // CHECK-NEXT:  entry:
1607*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
1608*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
1609*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vavgr.du(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
1610*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
1611*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1612673c5308Schenli //
vavgr_du(v2u64 _1,v2u64 _2)1613673c5308Schenli v2u64 vavgr_du(v2u64 _1, v2u64 _2) { return __lsx_vavgr_du(_1, _2); }
1614673c5308Schenli // CHECK-LABEL: @vssub_b(
1615673c5308Schenli // CHECK-NEXT:  entry:
1616*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
1617*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
1618*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssub.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
1619*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
1620*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1621673c5308Schenli //
vssub_b(v16i8 _1,v16i8 _2)1622673c5308Schenli v16i8 vssub_b(v16i8 _1, v16i8 _2) { return __lsx_vssub_b(_1, _2); }
1623673c5308Schenli // CHECK-LABEL: @vssub_h(
1624673c5308Schenli // CHECK-NEXT:  entry:
1625*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
1626*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
1627*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssub.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
1628*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
1629*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1630673c5308Schenli //
vssub_h(v8i16 _1,v8i16 _2)1631673c5308Schenli v8i16 vssub_h(v8i16 _1, v8i16 _2) { return __lsx_vssub_h(_1, _2); }
1632673c5308Schenli // CHECK-LABEL: @vssub_w(
1633673c5308Schenli // CHECK-NEXT:  entry:
1634*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
1635*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
1636*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssub.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
1637*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
1638*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1639673c5308Schenli //
vssub_w(v4i32 _1,v4i32 _2)1640673c5308Schenli v4i32 vssub_w(v4i32 _1, v4i32 _2) { return __lsx_vssub_w(_1, _2); }
1641673c5308Schenli // CHECK-LABEL: @vssub_d(
1642673c5308Schenli // CHECK-NEXT:  entry:
1643*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
1644*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
1645*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vssub.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
1646*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
1647*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1648673c5308Schenli //
vssub_d(v2i64 _1,v2i64 _2)1649673c5308Schenli v2i64 vssub_d(v2i64 _1, v2i64 _2) { return __lsx_vssub_d(_1, _2); }
1650673c5308Schenli // CHECK-LABEL: @vssub_bu(
1651673c5308Schenli // CHECK-NEXT:  entry:
1652*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
1653*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
1654*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssub.bu(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
1655*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
1656*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1657673c5308Schenli //
vssub_bu(v16u8 _1,v16u8 _2)1658673c5308Schenli v16u8 vssub_bu(v16u8 _1, v16u8 _2) { return __lsx_vssub_bu(_1, _2); }
1659673c5308Schenli // CHECK-LABEL: @vssub_hu(
1660673c5308Schenli // CHECK-NEXT:  entry:
1661*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
1662*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
1663*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssub.hu(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
1664*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
1665*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1666673c5308Schenli //
vssub_hu(v8u16 _1,v8u16 _2)1667673c5308Schenli v8u16 vssub_hu(v8u16 _1, v8u16 _2) { return __lsx_vssub_hu(_1, _2); }
1668673c5308Schenli // CHECK-LABEL: @vssub_wu(
1669673c5308Schenli // CHECK-NEXT:  entry:
1670*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
1671*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
1672*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssub.wu(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
1673*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
1674*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1675673c5308Schenli //
vssub_wu(v4u32 _1,v4u32 _2)1676673c5308Schenli v4u32 vssub_wu(v4u32 _1, v4u32 _2) { return __lsx_vssub_wu(_1, _2); }
1677673c5308Schenli // CHECK-LABEL: @vssub_du(
1678673c5308Schenli // CHECK-NEXT:  entry:
1679*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
1680*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
1681*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vssub.du(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
1682*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
1683*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1684673c5308Schenli //
vssub_du(v2u64 _1,v2u64 _2)1685673c5308Schenli v2u64 vssub_du(v2u64 _1, v2u64 _2) { return __lsx_vssub_du(_1, _2); }
1686673c5308Schenli // CHECK-LABEL: @vabsd_b(
1687673c5308Schenli // CHECK-NEXT:  entry:
1688*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
1689*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
1690*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vabsd.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
1691*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
1692*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1693673c5308Schenli //
vabsd_b(v16i8 _1,v16i8 _2)1694673c5308Schenli v16i8 vabsd_b(v16i8 _1, v16i8 _2) { return __lsx_vabsd_b(_1, _2); }
1695673c5308Schenli // CHECK-LABEL: @vabsd_h(
1696673c5308Schenli // CHECK-NEXT:  entry:
1697*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
1698*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
1699*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vabsd.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
1700*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
1701*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1702673c5308Schenli //
vabsd_h(v8i16 _1,v8i16 _2)1703673c5308Schenli v8i16 vabsd_h(v8i16 _1, v8i16 _2) { return __lsx_vabsd_h(_1, _2); }
1704673c5308Schenli // CHECK-LABEL: @vabsd_w(
1705673c5308Schenli // CHECK-NEXT:  entry:
1706*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
1707*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
1708*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vabsd.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
1709*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
1710*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1711673c5308Schenli //
vabsd_w(v4i32 _1,v4i32 _2)1712673c5308Schenli v4i32 vabsd_w(v4i32 _1, v4i32 _2) { return __lsx_vabsd_w(_1, _2); }
1713673c5308Schenli // CHECK-LABEL: @vabsd_d(
1714673c5308Schenli // CHECK-NEXT:  entry:
1715*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
1716*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
1717*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vabsd.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
1718*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
1719*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1720673c5308Schenli //
vabsd_d(v2i64 _1,v2i64 _2)1721673c5308Schenli v2i64 vabsd_d(v2i64 _1, v2i64 _2) { return __lsx_vabsd_d(_1, _2); }
1722673c5308Schenli // CHECK-LABEL: @vabsd_bu(
1723673c5308Schenli // CHECK-NEXT:  entry:
1724*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
1725*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
1726*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vabsd.bu(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
1727*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
1728*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1729673c5308Schenli //
vabsd_bu(v16u8 _1,v16u8 _2)1730673c5308Schenli v16u8 vabsd_bu(v16u8 _1, v16u8 _2) { return __lsx_vabsd_bu(_1, _2); }
1731673c5308Schenli // CHECK-LABEL: @vabsd_hu(
1732673c5308Schenli // CHECK-NEXT:  entry:
1733*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
1734*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
1735*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vabsd.hu(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
1736*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
1737*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1738673c5308Schenli //
vabsd_hu(v8u16 _1,v8u16 _2)1739673c5308Schenli v8u16 vabsd_hu(v8u16 _1, v8u16 _2) { return __lsx_vabsd_hu(_1, _2); }
1740673c5308Schenli // CHECK-LABEL: @vabsd_wu(
1741673c5308Schenli // CHECK-NEXT:  entry:
1742*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
1743*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
1744*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vabsd.wu(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
1745*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
1746*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1747673c5308Schenli //
vabsd_wu(v4u32 _1,v4u32 _2)1748673c5308Schenli v4u32 vabsd_wu(v4u32 _1, v4u32 _2) { return __lsx_vabsd_wu(_1, _2); }
1749673c5308Schenli // CHECK-LABEL: @vabsd_du(
1750673c5308Schenli // CHECK-NEXT:  entry:
1751*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
1752*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
1753*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vabsd.du(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
1754*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
1755*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1756673c5308Schenli //
vabsd_du(v2u64 _1,v2u64 _2)1757673c5308Schenli v2u64 vabsd_du(v2u64 _1, v2u64 _2) { return __lsx_vabsd_du(_1, _2); }
1758673c5308Schenli // CHECK-LABEL: @vmul_b(
1759673c5308Schenli // CHECK-NEXT:  entry:
1760*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
1761*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
1762*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmul.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
1763*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
1764*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1765673c5308Schenli //
vmul_b(v16i8 _1,v16i8 _2)1766673c5308Schenli v16i8 vmul_b(v16i8 _1, v16i8 _2) { return __lsx_vmul_b(_1, _2); }
1767673c5308Schenli // CHECK-LABEL: @vmul_h(
1768673c5308Schenli // CHECK-NEXT:  entry:
1769*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
1770*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
1771*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmul.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
1772*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
1773*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1774673c5308Schenli //
vmul_h(v8i16 _1,v8i16 _2)1775673c5308Schenli v8i16 vmul_h(v8i16 _1, v8i16 _2) { return __lsx_vmul_h(_1, _2); }
1776673c5308Schenli // CHECK-LABEL: @vmul_w(
1777673c5308Schenli // CHECK-NEXT:  entry:
1778*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
1779*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
1780*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmul.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
1781*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
1782*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1783673c5308Schenli //
vmul_w(v4i32 _1,v4i32 _2)1784673c5308Schenli v4i32 vmul_w(v4i32 _1, v4i32 _2) { return __lsx_vmul_w(_1, _2); }
1785673c5308Schenli // CHECK-LABEL: @vmul_d(
1786673c5308Schenli // CHECK-NEXT:  entry:
1787*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
1788*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
1789*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmul.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
1790*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
1791*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1792673c5308Schenli //
vmul_d(v2i64 _1,v2i64 _2)1793673c5308Schenli v2i64 vmul_d(v2i64 _1, v2i64 _2) { return __lsx_vmul_d(_1, _2); }
1794673c5308Schenli // CHECK-LABEL: @vmadd_b(
1795673c5308Schenli // CHECK-NEXT:  entry:
1796*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
1797*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
1798*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <16 x i8>
1799*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmadd.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]], <16 x i8> [[TMP2]])
1800*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <16 x i8> [[TMP3]] to i128
1801*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
1802673c5308Schenli //
vmadd_b(v16i8 _1,v16i8 _2,v16i8 _3)1803673c5308Schenli v16i8 vmadd_b(v16i8 _1, v16i8 _2, v16i8 _3) {
1804673c5308Schenli   return __lsx_vmadd_b(_1, _2, _3);
1805673c5308Schenli }
1806673c5308Schenli // CHECK-LABEL: @vmadd_h(
1807673c5308Schenli // CHECK-NEXT:  entry:
1808*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
1809*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
1810*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <8 x i16>
1811*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmadd.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]], <8 x i16> [[TMP2]])
1812*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <8 x i16> [[TMP3]] to i128
1813*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
1814673c5308Schenli //
vmadd_h(v8i16 _1,v8i16 _2,v8i16 _3)1815673c5308Schenli v8i16 vmadd_h(v8i16 _1, v8i16 _2, v8i16 _3) {
1816673c5308Schenli   return __lsx_vmadd_h(_1, _2, _3);
1817673c5308Schenli }
1818673c5308Schenli // CHECK-LABEL: @vmadd_w(
1819673c5308Schenli // CHECK-NEXT:  entry:
1820*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
1821*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
1822*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <4 x i32>
1823*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmadd.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]], <4 x i32> [[TMP2]])
1824*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <4 x i32> [[TMP3]] to i128
1825*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
1826673c5308Schenli //
vmadd_w(v4i32 _1,v4i32 _2,v4i32 _3)1827673c5308Schenli v4i32 vmadd_w(v4i32 _1, v4i32 _2, v4i32 _3) {
1828673c5308Schenli   return __lsx_vmadd_w(_1, _2, _3);
1829673c5308Schenli }
1830673c5308Schenli // CHECK-LABEL: @vmadd_d(
1831673c5308Schenli // CHECK-NEXT:  entry:
1832*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
1833*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
1834*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <2 x i64>
1835*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmadd.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i64> [[TMP2]])
1836*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
1837*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
1838673c5308Schenli //
vmadd_d(v2i64 _1,v2i64 _2,v2i64 _3)1839673c5308Schenli v2i64 vmadd_d(v2i64 _1, v2i64 _2, v2i64 _3) {
1840673c5308Schenli   return __lsx_vmadd_d(_1, _2, _3);
1841673c5308Schenli }
1842673c5308Schenli // CHECK-LABEL: @vmsub_b(
1843673c5308Schenli // CHECK-NEXT:  entry:
1844*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
1845*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
1846*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <16 x i8>
1847*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmsub.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]], <16 x i8> [[TMP2]])
1848*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <16 x i8> [[TMP3]] to i128
1849*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
1850673c5308Schenli //
vmsub_b(v16i8 _1,v16i8 _2,v16i8 _3)1851673c5308Schenli v16i8 vmsub_b(v16i8 _1, v16i8 _2, v16i8 _3) {
1852673c5308Schenli   return __lsx_vmsub_b(_1, _2, _3);
1853673c5308Schenli }
1854673c5308Schenli // CHECK-LABEL: @vmsub_h(
1855673c5308Schenli // CHECK-NEXT:  entry:
1856*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
1857*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
1858*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <8 x i16>
1859*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmsub.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]], <8 x i16> [[TMP2]])
1860*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <8 x i16> [[TMP3]] to i128
1861*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
1862673c5308Schenli //
vmsub_h(v8i16 _1,v8i16 _2,v8i16 _3)1863673c5308Schenli v8i16 vmsub_h(v8i16 _1, v8i16 _2, v8i16 _3) {
1864673c5308Schenli   return __lsx_vmsub_h(_1, _2, _3);
1865673c5308Schenli }
1866673c5308Schenli // CHECK-LABEL: @vmsub_w(
1867673c5308Schenli // CHECK-NEXT:  entry:
1868*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
1869*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
1870*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <4 x i32>
1871*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmsub.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]], <4 x i32> [[TMP2]])
1872*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <4 x i32> [[TMP3]] to i128
1873*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
1874673c5308Schenli //
vmsub_w(v4i32 _1,v4i32 _2,v4i32 _3)1875673c5308Schenli v4i32 vmsub_w(v4i32 _1, v4i32 _2, v4i32 _3) {
1876673c5308Schenli   return __lsx_vmsub_w(_1, _2, _3);
1877673c5308Schenli }
1878673c5308Schenli // CHECK-LABEL: @vmsub_d(
1879673c5308Schenli // CHECK-NEXT:  entry:
1880*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
1881*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
1882*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <2 x i64>
1883*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmsub.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i64> [[TMP2]])
1884*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
1885*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
1886673c5308Schenli //
vmsub_d(v2i64 _1,v2i64 _2,v2i64 _3)1887673c5308Schenli v2i64 vmsub_d(v2i64 _1, v2i64 _2, v2i64 _3) {
1888673c5308Schenli   return __lsx_vmsub_d(_1, _2, _3);
1889673c5308Schenli }
1890673c5308Schenli // CHECK-LABEL: @vdiv_b(
1891673c5308Schenli // CHECK-NEXT:  entry:
1892*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
1893*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
1894*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vdiv.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
1895*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
1896*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1897673c5308Schenli //
vdiv_b(v16i8 _1,v16i8 _2)1898673c5308Schenli v16i8 vdiv_b(v16i8 _1, v16i8 _2) { return __lsx_vdiv_b(_1, _2); }
1899673c5308Schenli // CHECK-LABEL: @vdiv_h(
1900673c5308Schenli // CHECK-NEXT:  entry:
1901*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
1902*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
1903*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vdiv.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
1904*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
1905*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1906673c5308Schenli //
vdiv_h(v8i16 _1,v8i16 _2)1907673c5308Schenli v8i16 vdiv_h(v8i16 _1, v8i16 _2) { return __lsx_vdiv_h(_1, _2); }
1908673c5308Schenli // CHECK-LABEL: @vdiv_w(
1909673c5308Schenli // CHECK-NEXT:  entry:
1910*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
1911*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
1912*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vdiv.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
1913*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
1914*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1915673c5308Schenli //
vdiv_w(v4i32 _1,v4i32 _2)1916673c5308Schenli v4i32 vdiv_w(v4i32 _1, v4i32 _2) { return __lsx_vdiv_w(_1, _2); }
1917673c5308Schenli // CHECK-LABEL: @vdiv_d(
1918673c5308Schenli // CHECK-NEXT:  entry:
1919*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
1920*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
1921*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vdiv.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
1922*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
1923*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1924673c5308Schenli //
vdiv_d(v2i64 _1,v2i64 _2)1925673c5308Schenli v2i64 vdiv_d(v2i64 _1, v2i64 _2) { return __lsx_vdiv_d(_1, _2); }
1926673c5308Schenli // CHECK-LABEL: @vdiv_bu(
1927673c5308Schenli // CHECK-NEXT:  entry:
1928*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
1929*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
1930*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vdiv.bu(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
1931*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
1932*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1933673c5308Schenli //
vdiv_bu(v16u8 _1,v16u8 _2)1934673c5308Schenli v16u8 vdiv_bu(v16u8 _1, v16u8 _2) { return __lsx_vdiv_bu(_1, _2); }
1935673c5308Schenli // CHECK-LABEL: @vdiv_hu(
1936673c5308Schenli // CHECK-NEXT:  entry:
1937*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
1938*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
1939*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vdiv.hu(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
1940*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
1941*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1942673c5308Schenli //
vdiv_hu(v8u16 _1,v8u16 _2)1943673c5308Schenli v8u16 vdiv_hu(v8u16 _1, v8u16 _2) { return __lsx_vdiv_hu(_1, _2); }
1944673c5308Schenli // CHECK-LABEL: @vdiv_wu(
1945673c5308Schenli // CHECK-NEXT:  entry:
1946*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
1947*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
1948*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vdiv.wu(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
1949*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
1950*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1951673c5308Schenli //
vdiv_wu(v4u32 _1,v4u32 _2)1952673c5308Schenli v4u32 vdiv_wu(v4u32 _1, v4u32 _2) { return __lsx_vdiv_wu(_1, _2); }
1953673c5308Schenli // CHECK-LABEL: @vdiv_du(
1954673c5308Schenli // CHECK-NEXT:  entry:
1955*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
1956*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
1957*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vdiv.du(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
1958*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
1959*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1960673c5308Schenli //
vdiv_du(v2u64 _1,v2u64 _2)1961673c5308Schenli v2u64 vdiv_du(v2u64 _1, v2u64 _2) { return __lsx_vdiv_du(_1, _2); }
1962673c5308Schenli // CHECK-LABEL: @vhaddw_h_b(
1963673c5308Schenli // CHECK-NEXT:  entry:
1964*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
1965*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
1966*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vhaddw.h.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
1967*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
1968*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1969673c5308Schenli //
vhaddw_h_b(v16i8 _1,v16i8 _2)1970673c5308Schenli v8i16 vhaddw_h_b(v16i8 _1, v16i8 _2) { return __lsx_vhaddw_h_b(_1, _2); }
1971673c5308Schenli // CHECK-LABEL: @vhaddw_w_h(
1972673c5308Schenli // CHECK-NEXT:  entry:
1973*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
1974*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
1975*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vhaddw.w.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
1976*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
1977*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1978673c5308Schenli //
vhaddw_w_h(v8i16 _1,v8i16 _2)1979673c5308Schenli v4i32 vhaddw_w_h(v8i16 _1, v8i16 _2) { return __lsx_vhaddw_w_h(_1, _2); }
1980673c5308Schenli // CHECK-LABEL: @vhaddw_d_w(
1981673c5308Schenli // CHECK-NEXT:  entry:
1982*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
1983*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
1984*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vhaddw.d.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
1985*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
1986*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1987673c5308Schenli //
vhaddw_d_w(v4i32 _1,v4i32 _2)1988673c5308Schenli v2i64 vhaddw_d_w(v4i32 _1, v4i32 _2) { return __lsx_vhaddw_d_w(_1, _2); }
1989673c5308Schenli // CHECK-LABEL: @vhaddw_hu_bu(
1990673c5308Schenli // CHECK-NEXT:  entry:
1991*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
1992*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
1993*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vhaddw.hu.bu(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
1994*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
1995*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
1996673c5308Schenli //
vhaddw_hu_bu(v16u8 _1,v16u8 _2)1997673c5308Schenli v8u16 vhaddw_hu_bu(v16u8 _1, v16u8 _2) { return __lsx_vhaddw_hu_bu(_1, _2); }
1998673c5308Schenli // CHECK-LABEL: @vhaddw_wu_hu(
1999673c5308Schenli // CHECK-NEXT:  entry:
2000*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
2001*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
2002*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vhaddw.wu.hu(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
2003*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
2004*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2005673c5308Schenli //
vhaddw_wu_hu(v8u16 _1,v8u16 _2)2006673c5308Schenli v4u32 vhaddw_wu_hu(v8u16 _1, v8u16 _2) { return __lsx_vhaddw_wu_hu(_1, _2); }
2007673c5308Schenli // CHECK-LABEL: @vhaddw_du_wu(
2008673c5308Schenli // CHECK-NEXT:  entry:
2009*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
2010*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
2011*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vhaddw.du.wu(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
2012*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
2013*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2014673c5308Schenli //
vhaddw_du_wu(v4u32 _1,v4u32 _2)2015673c5308Schenli v2u64 vhaddw_du_wu(v4u32 _1, v4u32 _2) { return __lsx_vhaddw_du_wu(_1, _2); }
2016673c5308Schenli // CHECK-LABEL: @vhsubw_h_b(
2017673c5308Schenli // CHECK-NEXT:  entry:
2018*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
2019*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
2020*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vhsubw.h.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
2021*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
2022*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2023673c5308Schenli //
vhsubw_h_b(v16i8 _1,v16i8 _2)2024673c5308Schenli v8i16 vhsubw_h_b(v16i8 _1, v16i8 _2) { return __lsx_vhsubw_h_b(_1, _2); }
2025673c5308Schenli // CHECK-LABEL: @vhsubw_w_h(
2026673c5308Schenli // CHECK-NEXT:  entry:
2027*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
2028*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
2029*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vhsubw.w.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
2030*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
2031*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2032673c5308Schenli //
vhsubw_w_h(v8i16 _1,v8i16 _2)2033673c5308Schenli v4i32 vhsubw_w_h(v8i16 _1, v8i16 _2) { return __lsx_vhsubw_w_h(_1, _2); }
2034673c5308Schenli // CHECK-LABEL: @vhsubw_d_w(
2035673c5308Schenli // CHECK-NEXT:  entry:
2036*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
2037*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
2038*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vhsubw.d.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
2039*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
2040*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2041673c5308Schenli //
vhsubw_d_w(v4i32 _1,v4i32 _2)2042673c5308Schenli v2i64 vhsubw_d_w(v4i32 _1, v4i32 _2) { return __lsx_vhsubw_d_w(_1, _2); }
2043673c5308Schenli // CHECK-LABEL: @vhsubw_hu_bu(
2044673c5308Schenli // CHECK-NEXT:  entry:
2045*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
2046*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
2047*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vhsubw.hu.bu(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
2048*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
2049*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2050673c5308Schenli //
vhsubw_hu_bu(v16u8 _1,v16u8 _2)2051673c5308Schenli v8i16 vhsubw_hu_bu(v16u8 _1, v16u8 _2) { return __lsx_vhsubw_hu_bu(_1, _2); }
2052673c5308Schenli // CHECK-LABEL: @vhsubw_wu_hu(
2053673c5308Schenli // CHECK-NEXT:  entry:
2054*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
2055*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
2056*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vhsubw.wu.hu(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
2057*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
2058*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2059673c5308Schenli //
vhsubw_wu_hu(v8u16 _1,v8u16 _2)2060673c5308Schenli v4i32 vhsubw_wu_hu(v8u16 _1, v8u16 _2) { return __lsx_vhsubw_wu_hu(_1, _2); }
2061673c5308Schenli // CHECK-LABEL: @vhsubw_du_wu(
2062673c5308Schenli // CHECK-NEXT:  entry:
2063*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
2064*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
2065*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vhsubw.du.wu(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
2066*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
2067*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2068673c5308Schenli //
vhsubw_du_wu(v4u32 _1,v4u32 _2)2069673c5308Schenli v2i64 vhsubw_du_wu(v4u32 _1, v4u32 _2) { return __lsx_vhsubw_du_wu(_1, _2); }
2070673c5308Schenli // CHECK-LABEL: @vmod_b(
2071673c5308Schenli // CHECK-NEXT:  entry:
2072*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
2073*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
2074*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmod.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
2075*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
2076*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2077673c5308Schenli //
vmod_b(v16i8 _1,v16i8 _2)2078673c5308Schenli v16i8 vmod_b(v16i8 _1, v16i8 _2) { return __lsx_vmod_b(_1, _2); }
2079673c5308Schenli // CHECK-LABEL: @vmod_h(
2080673c5308Schenli // CHECK-NEXT:  entry:
2081*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
2082*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
2083*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmod.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
2084*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
2085*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2086673c5308Schenli //
vmod_h(v8i16 _1,v8i16 _2)2087673c5308Schenli v8i16 vmod_h(v8i16 _1, v8i16 _2) { return __lsx_vmod_h(_1, _2); }
2088673c5308Schenli // CHECK-LABEL: @vmod_w(
2089673c5308Schenli // CHECK-NEXT:  entry:
2090*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
2091*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
2092*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmod.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
2093*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
2094*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2095673c5308Schenli //
vmod_w(v4i32 _1,v4i32 _2)2096673c5308Schenli v4i32 vmod_w(v4i32 _1, v4i32 _2) { return __lsx_vmod_w(_1, _2); }
2097673c5308Schenli // CHECK-LABEL: @vmod_d(
2098673c5308Schenli // CHECK-NEXT:  entry:
2099*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
2100*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
2101*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmod.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
2102*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
2103*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2104673c5308Schenli //
vmod_d(v2i64 _1,v2i64 _2)2105673c5308Schenli v2i64 vmod_d(v2i64 _1, v2i64 _2) { return __lsx_vmod_d(_1, _2); }
2106673c5308Schenli // CHECK-LABEL: @vmod_bu(
2107673c5308Schenli // CHECK-NEXT:  entry:
2108*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
2109*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
2110*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmod.bu(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
2111*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
2112*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2113673c5308Schenli //
vmod_bu(v16u8 _1,v16u8 _2)2114673c5308Schenli v16u8 vmod_bu(v16u8 _1, v16u8 _2) { return __lsx_vmod_bu(_1, _2); }
2115673c5308Schenli // CHECK-LABEL: @vmod_hu(
2116673c5308Schenli // CHECK-NEXT:  entry:
2117*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
2118*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
2119*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmod.hu(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
2120*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
2121*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2122673c5308Schenli //
vmod_hu(v8u16 _1,v8u16 _2)2123673c5308Schenli v8u16 vmod_hu(v8u16 _1, v8u16 _2) { return __lsx_vmod_hu(_1, _2); }
2124673c5308Schenli // CHECK-LABEL: @vmod_wu(
2125673c5308Schenli // CHECK-NEXT:  entry:
2126*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
2127*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
2128*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmod.wu(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
2129*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
2130*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2131673c5308Schenli //
vmod_wu(v4u32 _1,v4u32 _2)2132673c5308Schenli v4u32 vmod_wu(v4u32 _1, v4u32 _2) { return __lsx_vmod_wu(_1, _2); }
2133673c5308Schenli // CHECK-LABEL: @vmod_du(
2134673c5308Schenli // CHECK-NEXT:  entry:
2135*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
2136*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
2137*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmod.du(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
2138*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
2139*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2140673c5308Schenli //
vmod_du(v2u64 _1,v2u64 _2)2141673c5308Schenli v2u64 vmod_du(v2u64 _1, v2u64 _2) { return __lsx_vmod_du(_1, _2); }
2142673c5308Schenli // CHECK-LABEL: @vreplve_b(
2143673c5308Schenli // CHECK-NEXT:  entry:
2144*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
2145*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vreplve.b(<16 x i8> [[TMP0]], i32 [[_2:%.*]])
2146*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
2147*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2148673c5308Schenli //
vreplve_b(v16i8 _1,int _2)2149673c5308Schenli v16i8 vreplve_b(v16i8 _1, int _2) { return __lsx_vreplve_b(_1, _2); }
2150673c5308Schenli // CHECK-LABEL: @vreplve_h(
2151673c5308Schenli // CHECK-NEXT:  entry:
2152*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
2153*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vreplve.h(<8 x i16> [[TMP0]], i32 [[_2:%.*]])
2154*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
2155*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2156673c5308Schenli //
vreplve_h(v8i16 _1,int _2)2157673c5308Schenli v8i16 vreplve_h(v8i16 _1, int _2) { return __lsx_vreplve_h(_1, _2); }
2158673c5308Schenli // CHECK-LABEL: @vreplve_w(
2159673c5308Schenli // CHECK-NEXT:  entry:
2160*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
2161*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vreplve.w(<4 x i32> [[TMP0]], i32 [[_2:%.*]])
2162*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
2163*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2164673c5308Schenli //
vreplve_w(v4i32 _1,int _2)2165673c5308Schenli v4i32 vreplve_w(v4i32 _1, int _2) { return __lsx_vreplve_w(_1, _2); }
2166673c5308Schenli // CHECK-LABEL: @vreplve_d(
2167673c5308Schenli // CHECK-NEXT:  entry:
2168*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
2169*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vreplve.d(<2 x i64> [[TMP0]], i32 [[_2:%.*]])
2170*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
2171*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2172673c5308Schenli //
vreplve_d(v2i64 _1,int _2)2173673c5308Schenli v2i64 vreplve_d(v2i64 _1, int _2) { return __lsx_vreplve_d(_1, _2); }
2174673c5308Schenli // CHECK-LABEL: @vreplvei_b(
2175673c5308Schenli // CHECK-NEXT:  entry:
2176*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
2177*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vreplvei.b(<16 x i8> [[TMP0]], i32 1)
2178*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
2179*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2180673c5308Schenli //
vreplvei_b(v16i8 _1)2181673c5308Schenli v16i8 vreplvei_b(v16i8 _1) { return __lsx_vreplvei_b(_1, 1); }
2182673c5308Schenli // CHECK-LABEL: @vreplvei_h(
2183673c5308Schenli // CHECK-NEXT:  entry:
2184*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
2185*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vreplvei.h(<8 x i16> [[TMP0]], i32 1)
2186*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
2187*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2188673c5308Schenli //
vreplvei_h(v8i16 _1)2189673c5308Schenli v8i16 vreplvei_h(v8i16 _1) { return __lsx_vreplvei_h(_1, 1); }
2190673c5308Schenli // CHECK-LABEL: @vreplvei_w(
2191673c5308Schenli // CHECK-NEXT:  entry:
2192*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
2193*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vreplvei.w(<4 x i32> [[TMP0]], i32 1)
2194*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
2195*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2196673c5308Schenli //
vreplvei_w(v4i32 _1)2197673c5308Schenli v4i32 vreplvei_w(v4i32 _1) { return __lsx_vreplvei_w(_1, 1); }
2198673c5308Schenli // CHECK-LABEL: @vreplvei_d(
2199673c5308Schenli // CHECK-NEXT:  entry:
2200*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
2201*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vreplvei.d(<2 x i64> [[TMP0]], i32 1)
2202*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
2203*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2204673c5308Schenli //
vreplvei_d(v2i64 _1)2205673c5308Schenli v2i64 vreplvei_d(v2i64 _1) { return __lsx_vreplvei_d(_1, 1); }
2206673c5308Schenli // CHECK-LABEL: @vpickev_b(
2207673c5308Schenli // CHECK-NEXT:  entry:
2208*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
2209*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
2210*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vpickev.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
2211*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
2212*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2213673c5308Schenli //
vpickev_b(v16i8 _1,v16i8 _2)2214673c5308Schenli v16i8 vpickev_b(v16i8 _1, v16i8 _2) { return __lsx_vpickev_b(_1, _2); }
2215673c5308Schenli // CHECK-LABEL: @vpickev_h(
2216673c5308Schenli // CHECK-NEXT:  entry:
2217*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
2218*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
2219*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vpickev.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
2220*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
2221*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2222673c5308Schenli //
vpickev_h(v8i16 _1,v8i16 _2)2223673c5308Schenli v8i16 vpickev_h(v8i16 _1, v8i16 _2) { return __lsx_vpickev_h(_1, _2); }
2224673c5308Schenli // CHECK-LABEL: @vpickev_w(
2225673c5308Schenli // CHECK-NEXT:  entry:
2226*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
2227*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
2228*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vpickev.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
2229*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
2230*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2231673c5308Schenli //
vpickev_w(v4i32 _1,v4i32 _2)2232673c5308Schenli v4i32 vpickev_w(v4i32 _1, v4i32 _2) { return __lsx_vpickev_w(_1, _2); }
2233673c5308Schenli // CHECK-LABEL: @vpickev_d(
2234673c5308Schenli // CHECK-NEXT:  entry:
2235*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
2236*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
2237*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vpickev.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
2238*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
2239*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2240673c5308Schenli //
vpickev_d(v2i64 _1,v2i64 _2)2241673c5308Schenli v2i64 vpickev_d(v2i64 _1, v2i64 _2) { return __lsx_vpickev_d(_1, _2); }
2242673c5308Schenli // CHECK-LABEL: @vpickod_b(
2243673c5308Schenli // CHECK-NEXT:  entry:
2244*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
2245*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
2246*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vpickod.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
2247*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
2248*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2249673c5308Schenli //
vpickod_b(v16i8 _1,v16i8 _2)2250673c5308Schenli v16i8 vpickod_b(v16i8 _1, v16i8 _2) { return __lsx_vpickod_b(_1, _2); }
2251673c5308Schenli // CHECK-LABEL: @vpickod_h(
2252673c5308Schenli // CHECK-NEXT:  entry:
2253*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
2254*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
2255*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vpickod.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
2256*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
2257*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2258673c5308Schenli //
vpickod_h(v8i16 _1,v8i16 _2)2259673c5308Schenli v8i16 vpickod_h(v8i16 _1, v8i16 _2) { return __lsx_vpickod_h(_1, _2); }
2260673c5308Schenli // CHECK-LABEL: @vpickod_w(
2261673c5308Schenli // CHECK-NEXT:  entry:
2262*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
2263*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
2264*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vpickod.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
2265*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
2266*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2267673c5308Schenli //
vpickod_w(v4i32 _1,v4i32 _2)2268673c5308Schenli v4i32 vpickod_w(v4i32 _1, v4i32 _2) { return __lsx_vpickod_w(_1, _2); }
2269673c5308Schenli // CHECK-LABEL: @vpickod_d(
2270673c5308Schenli // CHECK-NEXT:  entry:
2271*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
2272*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
2273*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vpickod.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
2274*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
2275*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2276673c5308Schenli //
vpickod_d(v2i64 _1,v2i64 _2)2277673c5308Schenli v2i64 vpickod_d(v2i64 _1, v2i64 _2) { return __lsx_vpickod_d(_1, _2); }
2278673c5308Schenli // CHECK-LABEL: @vilvh_b(
2279673c5308Schenli // CHECK-NEXT:  entry:
2280*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
2281*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
2282*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vilvh.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
2283*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
2284*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2285673c5308Schenli //
vilvh_b(v16i8 _1,v16i8 _2)2286673c5308Schenli v16i8 vilvh_b(v16i8 _1, v16i8 _2) { return __lsx_vilvh_b(_1, _2); }
2287673c5308Schenli // CHECK-LABEL: @vilvh_h(
2288673c5308Schenli // CHECK-NEXT:  entry:
2289*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
2290*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
2291*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vilvh.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
2292*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
2293*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2294673c5308Schenli //
vilvh_h(v8i16 _1,v8i16 _2)2295673c5308Schenli v8i16 vilvh_h(v8i16 _1, v8i16 _2) { return __lsx_vilvh_h(_1, _2); }
2296673c5308Schenli // CHECK-LABEL: @vilvh_w(
2297673c5308Schenli // CHECK-NEXT:  entry:
2298*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
2299*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
2300*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vilvh.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
2301*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
2302*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2303673c5308Schenli //
vilvh_w(v4i32 _1,v4i32 _2)2304673c5308Schenli v4i32 vilvh_w(v4i32 _1, v4i32 _2) { return __lsx_vilvh_w(_1, _2); }
2305673c5308Schenli // CHECK-LABEL: @vilvh_d(
2306673c5308Schenli // CHECK-NEXT:  entry:
2307*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
2308*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
2309*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vilvh.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
2310*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
2311*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2312673c5308Schenli //
vilvh_d(v2i64 _1,v2i64 _2)2313673c5308Schenli v2i64 vilvh_d(v2i64 _1, v2i64 _2) { return __lsx_vilvh_d(_1, _2); }
2314673c5308Schenli // CHECK-LABEL: @vilvl_b(
2315673c5308Schenli // CHECK-NEXT:  entry:
2316*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
2317*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
2318*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vilvl.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
2319*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
2320*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2321673c5308Schenli //
vilvl_b(v16i8 _1,v16i8 _2)2322673c5308Schenli v16i8 vilvl_b(v16i8 _1, v16i8 _2) { return __lsx_vilvl_b(_1, _2); }
2323673c5308Schenli // CHECK-LABEL: @vilvl_h(
2324673c5308Schenli // CHECK-NEXT:  entry:
2325*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
2326*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
2327*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vilvl.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
2328*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
2329*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2330673c5308Schenli //
vilvl_h(v8i16 _1,v8i16 _2)2331673c5308Schenli v8i16 vilvl_h(v8i16 _1, v8i16 _2) { return __lsx_vilvl_h(_1, _2); }
2332673c5308Schenli // CHECK-LABEL: @vilvl_w(
2333673c5308Schenli // CHECK-NEXT:  entry:
2334*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
2335*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
2336*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vilvl.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
2337*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
2338*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2339673c5308Schenli //
vilvl_w(v4i32 _1,v4i32 _2)2340673c5308Schenli v4i32 vilvl_w(v4i32 _1, v4i32 _2) { return __lsx_vilvl_w(_1, _2); }
2341673c5308Schenli // CHECK-LABEL: @vilvl_d(
2342673c5308Schenli // CHECK-NEXT:  entry:
2343*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
2344*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
2345*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vilvl.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
2346*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
2347*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2348673c5308Schenli //
vilvl_d(v2i64 _1,v2i64 _2)2349673c5308Schenli v2i64 vilvl_d(v2i64 _1, v2i64 _2) { return __lsx_vilvl_d(_1, _2); }
2350673c5308Schenli // CHECK-LABEL: @vpackev_b(
2351673c5308Schenli // CHECK-NEXT:  entry:
2352*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
2353*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
2354*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vpackev.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
2355*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
2356*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2357673c5308Schenli //
vpackev_b(v16i8 _1,v16i8 _2)2358673c5308Schenli v16i8 vpackev_b(v16i8 _1, v16i8 _2) { return __lsx_vpackev_b(_1, _2); }
2359673c5308Schenli // CHECK-LABEL: @vpackev_h(
2360673c5308Schenli // CHECK-NEXT:  entry:
2361*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
2362*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
2363*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vpackev.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
2364*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
2365*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2366673c5308Schenli //
vpackev_h(v8i16 _1,v8i16 _2)2367673c5308Schenli v8i16 vpackev_h(v8i16 _1, v8i16 _2) { return __lsx_vpackev_h(_1, _2); }
2368673c5308Schenli // CHECK-LABEL: @vpackev_w(
2369673c5308Schenli // CHECK-NEXT:  entry:
2370*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
2371*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
2372*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vpackev.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
2373*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
2374*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2375673c5308Schenli //
vpackev_w(v4i32 _1,v4i32 _2)2376673c5308Schenli v4i32 vpackev_w(v4i32 _1, v4i32 _2) { return __lsx_vpackev_w(_1, _2); }
2377673c5308Schenli // CHECK-LABEL: @vpackev_d(
2378673c5308Schenli // CHECK-NEXT:  entry:
2379*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
2380*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
2381*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vpackev.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
2382*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
2383*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2384673c5308Schenli //
vpackev_d(v2i64 _1,v2i64 _2)2385673c5308Schenli v2i64 vpackev_d(v2i64 _1, v2i64 _2) { return __lsx_vpackev_d(_1, _2); }
2386673c5308Schenli // CHECK-LABEL: @vpackod_b(
2387673c5308Schenli // CHECK-NEXT:  entry:
2388*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
2389*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
2390*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vpackod.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
2391*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
2392*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2393673c5308Schenli //
vpackod_b(v16i8 _1,v16i8 _2)2394673c5308Schenli v16i8 vpackod_b(v16i8 _1, v16i8 _2) { return __lsx_vpackod_b(_1, _2); }
2395673c5308Schenli // CHECK-LABEL: @vpackod_h(
2396673c5308Schenli // CHECK-NEXT:  entry:
2397*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
2398*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
2399*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vpackod.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
2400*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
2401*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2402673c5308Schenli //
vpackod_h(v8i16 _1,v8i16 _2)2403673c5308Schenli v8i16 vpackod_h(v8i16 _1, v8i16 _2) { return __lsx_vpackod_h(_1, _2); }
2404673c5308Schenli // CHECK-LABEL: @vpackod_w(
2405673c5308Schenli // CHECK-NEXT:  entry:
2406*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
2407*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
2408*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vpackod.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
2409*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
2410*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2411673c5308Schenli //
vpackod_w(v4i32 _1,v4i32 _2)2412673c5308Schenli v4i32 vpackod_w(v4i32 _1, v4i32 _2) { return __lsx_vpackod_w(_1, _2); }
2413673c5308Schenli // CHECK-LABEL: @vpackod_d(
2414673c5308Schenli // CHECK-NEXT:  entry:
2415*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
2416*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
2417*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vpackod.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
2418*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
2419*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2420673c5308Schenli //
vpackod_d(v2i64 _1,v2i64 _2)2421673c5308Schenli v2i64 vpackod_d(v2i64 _1, v2i64 _2) { return __lsx_vpackod_d(_1, _2); }
2422673c5308Schenli // CHECK-LABEL: @vshuf_h(
2423673c5308Schenli // CHECK-NEXT:  entry:
2424*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
2425*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
2426*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <8 x i16>
2427*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vshuf.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]], <8 x i16> [[TMP2]])
2428*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <8 x i16> [[TMP3]] to i128
2429*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
2430673c5308Schenli //
vshuf_h(v8i16 _1,v8i16 _2,v8i16 _3)2431673c5308Schenli v8i16 vshuf_h(v8i16 _1, v8i16 _2, v8i16 _3) {
2432673c5308Schenli   return __lsx_vshuf_h(_1, _2, _3);
2433673c5308Schenli }
2434673c5308Schenli // CHECK-LABEL: @vshuf_w(
2435673c5308Schenli // CHECK-NEXT:  entry:
2436*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
2437*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
2438*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <4 x i32>
2439*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vshuf.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]], <4 x i32> [[TMP2]])
2440*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <4 x i32> [[TMP3]] to i128
2441*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
2442673c5308Schenli //
vshuf_w(v4i32 _1,v4i32 _2,v4i32 _3)2443673c5308Schenli v4i32 vshuf_w(v4i32 _1, v4i32 _2, v4i32 _3) {
2444673c5308Schenli   return __lsx_vshuf_w(_1, _2, _3);
2445673c5308Schenli }
2446673c5308Schenli // CHECK-LABEL: @vshuf_d(
2447673c5308Schenli // CHECK-NEXT:  entry:
2448*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
2449*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
2450*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <2 x i64>
2451*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vshuf.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i64> [[TMP2]])
2452*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
2453*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
2454673c5308Schenli //
vshuf_d(v2i64 _1,v2i64 _2,v2i64 _3)2455673c5308Schenli v2i64 vshuf_d(v2i64 _1, v2i64 _2, v2i64 _3) {
2456673c5308Schenli   return __lsx_vshuf_d(_1, _2, _3);
2457673c5308Schenli }
2458673c5308Schenli // CHECK-LABEL: @vand_v(
2459673c5308Schenli // CHECK-NEXT:  entry:
2460*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
2461*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
2462*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vand.v(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
2463*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
2464*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2465673c5308Schenli //
vand_v(v16u8 _1,v16u8 _2)2466673c5308Schenli v16u8 vand_v(v16u8 _1, v16u8 _2) { return __lsx_vand_v(_1, _2); }
2467673c5308Schenli // CHECK-LABEL: @vandi_b(
2468673c5308Schenli // CHECK-NEXT:  entry:
2469*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
2470*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vandi.b(<16 x i8> [[TMP0]], i32 1)
2471*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
2472*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2473673c5308Schenli //
vandi_b(v16u8 _1)2474673c5308Schenli v16u8 vandi_b(v16u8 _1) { return __lsx_vandi_b(_1, 1); }
2475673c5308Schenli // CHECK-LABEL: @vor_v(
2476673c5308Schenli // CHECK-NEXT:  entry:
2477*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
2478*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
2479*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vor.v(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
2480*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
2481*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2482673c5308Schenli //
vor_v(v16u8 _1,v16u8 _2)2483673c5308Schenli v16u8 vor_v(v16u8 _1, v16u8 _2) { return __lsx_vor_v(_1, _2); }
2484673c5308Schenli // CHECK-LABEL: @vori_b(
2485673c5308Schenli // CHECK-NEXT:  entry:
2486*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
2487*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vori.b(<16 x i8> [[TMP0]], i32 1)
2488*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
2489*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2490673c5308Schenli //
vori_b(v16u8 _1)2491673c5308Schenli v16u8 vori_b(v16u8 _1) { return __lsx_vori_b(_1, 1); }
2492673c5308Schenli // CHECK-LABEL: @vnor_v(
2493673c5308Schenli // CHECK-NEXT:  entry:
2494*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
2495*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
2496*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vnor.v(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
2497*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
2498*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2499673c5308Schenli //
vnor_v(v16u8 _1,v16u8 _2)2500673c5308Schenli v16u8 vnor_v(v16u8 _1, v16u8 _2) { return __lsx_vnor_v(_1, _2); }
2501673c5308Schenli // CHECK-LABEL: @vnori_b(
2502673c5308Schenli // CHECK-NEXT:  entry:
2503*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
2504*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vnori.b(<16 x i8> [[TMP0]], i32 1)
2505*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
2506*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2507673c5308Schenli //
vnori_b(v16u8 _1)2508673c5308Schenli v16u8 vnori_b(v16u8 _1) { return __lsx_vnori_b(_1, 1); }
2509673c5308Schenli // CHECK-LABEL: @vxor_v(
2510673c5308Schenli // CHECK-NEXT:  entry:
2511*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
2512*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
2513*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vxor.v(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
2514*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
2515*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2516673c5308Schenli //
vxor_v(v16u8 _1,v16u8 _2)2517673c5308Schenli v16u8 vxor_v(v16u8 _1, v16u8 _2) { return __lsx_vxor_v(_1, _2); }
2518673c5308Schenli // CHECK-LABEL: @vxori_b(
2519673c5308Schenli // CHECK-NEXT:  entry:
2520*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
2521*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vxori.b(<16 x i8> [[TMP0]], i32 1)
2522*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
2523*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2524673c5308Schenli //
vxori_b(v16u8 _1)2525673c5308Schenli v16u8 vxori_b(v16u8 _1) { return __lsx_vxori_b(_1, 1); }
2526673c5308Schenli // CHECK-LABEL: @vbitsel_v(
2527673c5308Schenli // CHECK-NEXT:  entry:
2528*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
2529*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
2530*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <16 x i8>
2531*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vbitsel.v(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]], <16 x i8> [[TMP2]])
2532*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <16 x i8> [[TMP3]] to i128
2533*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
2534673c5308Schenli //
vbitsel_v(v16u8 _1,v16u8 _2,v16u8 _3)2535673c5308Schenli v16u8 vbitsel_v(v16u8 _1, v16u8 _2, v16u8 _3) {
2536673c5308Schenli   return __lsx_vbitsel_v(_1, _2, _3);
2537673c5308Schenli }
2538673c5308Schenli // CHECK-LABEL: @vbitseli_b(
2539673c5308Schenli // CHECK-NEXT:  entry:
2540*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
2541*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
2542*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vbitseli.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]], i32 1)
2543*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
2544*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2545673c5308Schenli //
vbitseli_b(v16u8 _1,v16u8 _2)2546673c5308Schenli v16u8 vbitseli_b(v16u8 _1, v16u8 _2) { return __lsx_vbitseli_b(_1, _2, 1); }
2547673c5308Schenli // CHECK-LABEL: @vshuf4i_b(
2548673c5308Schenli // CHECK-NEXT:  entry:
2549*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
2550*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vshuf4i.b(<16 x i8> [[TMP0]], i32 1)
2551*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
2552*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2553673c5308Schenli //
vshuf4i_b(v16i8 _1)2554673c5308Schenli v16i8 vshuf4i_b(v16i8 _1) { return __lsx_vshuf4i_b(_1, 1); }
2555673c5308Schenli // CHECK-LABEL: @vshuf4i_h(
2556673c5308Schenli // CHECK-NEXT:  entry:
2557*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
2558*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vshuf4i.h(<8 x i16> [[TMP0]], i32 1)
2559*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
2560*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2561673c5308Schenli //
vshuf4i_h(v8i16 _1)2562673c5308Schenli v8i16 vshuf4i_h(v8i16 _1) { return __lsx_vshuf4i_h(_1, 1); }
2563673c5308Schenli // CHECK-LABEL: @vshuf4i_w(
2564673c5308Schenli // CHECK-NEXT:  entry:
2565*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
2566*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vshuf4i.w(<4 x i32> [[TMP0]], i32 1)
2567*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
2568*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2569673c5308Schenli //
vshuf4i_w(v4i32 _1)2570673c5308Schenli v4i32 vshuf4i_w(v4i32 _1) { return __lsx_vshuf4i_w(_1, 1); }
2571673c5308Schenli // CHECK-LABEL: @vreplgr2vr_b(
2572673c5308Schenli // CHECK-NEXT:  entry:
2573673c5308Schenli // CHECK-NEXT:    [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vreplgr2vr.b(i32 [[_1:%.*]])
2574*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to i128
2575*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP1]]
2576673c5308Schenli //
vreplgr2vr_b(int _1)2577673c5308Schenli v16i8 vreplgr2vr_b(int _1) { return __lsx_vreplgr2vr_b(_1); }
2578673c5308Schenli // CHECK-LABEL: @vreplgr2vr_h(
2579673c5308Schenli // CHECK-NEXT:  entry:
2580673c5308Schenli // CHECK-NEXT:    [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vreplgr2vr.h(i32 [[_1:%.*]])
2581*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i16> [[TMP0]] to i128
2582*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP1]]
2583673c5308Schenli //
vreplgr2vr_h(int _1)2584673c5308Schenli v8i16 vreplgr2vr_h(int _1) { return __lsx_vreplgr2vr_h(_1); }
2585673c5308Schenli // CHECK-LABEL: @vreplgr2vr_w(
2586673c5308Schenli // CHECK-NEXT:  entry:
2587673c5308Schenli // CHECK-NEXT:    [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vreplgr2vr.w(i32 [[_1:%.*]])
2588*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to i128
2589*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP1]]
2590673c5308Schenli //
vreplgr2vr_w(int _1)2591673c5308Schenli v4i32 vreplgr2vr_w(int _1) { return __lsx_vreplgr2vr_w(_1); }
2592673c5308Schenli // CHECK-LABEL: @vreplgr2vr_d(
2593673c5308Schenli // CHECK-NEXT:  entry:
2594673c5308Schenli // CHECK-NEXT:    [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vreplgr2vr.d(i64 [[_1:%.*]])
2595*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to i128
2596*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP1]]
2597673c5308Schenli //
vreplgr2vr_d(long _1)2598673c5308Schenli v2i64 vreplgr2vr_d(long _1) { return __lsx_vreplgr2vr_d(_1); }
2599673c5308Schenli // CHECK-LABEL: @vpcnt_b(
2600673c5308Schenli // CHECK-NEXT:  entry:
2601*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
2602*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vpcnt.b(<16 x i8> [[TMP0]])
2603*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
2604*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2605673c5308Schenli //
vpcnt_b(v16i8 _1)2606673c5308Schenli v16i8 vpcnt_b(v16i8 _1) { return __lsx_vpcnt_b(_1); }
2607673c5308Schenli // CHECK-LABEL: @vpcnt_h(
2608673c5308Schenli // CHECK-NEXT:  entry:
2609*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
2610*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vpcnt.h(<8 x i16> [[TMP0]])
2611*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
2612*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2613673c5308Schenli //
vpcnt_h(v8i16 _1)2614673c5308Schenli v8i16 vpcnt_h(v8i16 _1) { return __lsx_vpcnt_h(_1); }
2615673c5308Schenli // CHECK-LABEL: @vpcnt_w(
2616673c5308Schenli // CHECK-NEXT:  entry:
2617*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
2618*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vpcnt.w(<4 x i32> [[TMP0]])
2619*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
2620*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2621673c5308Schenli //
vpcnt_w(v4i32 _1)2622673c5308Schenli v4i32 vpcnt_w(v4i32 _1) { return __lsx_vpcnt_w(_1); }
2623673c5308Schenli // CHECK-LABEL: @vpcnt_d(
2624673c5308Schenli // CHECK-NEXT:  entry:
2625*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
2626*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vpcnt.d(<2 x i64> [[TMP0]])
2627*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
2628*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2629673c5308Schenli //
vpcnt_d(v2i64 _1)2630673c5308Schenli v2i64 vpcnt_d(v2i64 _1) { return __lsx_vpcnt_d(_1); }
2631673c5308Schenli // CHECK-LABEL: @vclo_b(
2632673c5308Schenli // CHECK-NEXT:  entry:
2633*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
2634*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vclo.b(<16 x i8> [[TMP0]])
2635*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
2636*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2637673c5308Schenli //
vclo_b(v16i8 _1)2638673c5308Schenli v16i8 vclo_b(v16i8 _1) { return __lsx_vclo_b(_1); }
2639673c5308Schenli // CHECK-LABEL: @vclo_h(
2640673c5308Schenli // CHECK-NEXT:  entry:
2641*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
2642*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vclo.h(<8 x i16> [[TMP0]])
2643*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
2644*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2645673c5308Schenli //
vclo_h(v8i16 _1)2646673c5308Schenli v8i16 vclo_h(v8i16 _1) { return __lsx_vclo_h(_1); }
2647673c5308Schenli // CHECK-LABEL: @vclo_w(
2648673c5308Schenli // CHECK-NEXT:  entry:
2649*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
2650*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vclo.w(<4 x i32> [[TMP0]])
2651*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
2652*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2653673c5308Schenli //
vclo_w(v4i32 _1)2654673c5308Schenli v4i32 vclo_w(v4i32 _1) { return __lsx_vclo_w(_1); }
2655673c5308Schenli // CHECK-LABEL: @vclo_d(
2656673c5308Schenli // CHECK-NEXT:  entry:
2657*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
2658*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vclo.d(<2 x i64> [[TMP0]])
2659*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
2660*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2661673c5308Schenli //
vclo_d(v2i64 _1)2662673c5308Schenli v2i64 vclo_d(v2i64 _1) { return __lsx_vclo_d(_1); }
2663673c5308Schenli // CHECK-LABEL: @vclz_b(
2664673c5308Schenli // CHECK-NEXT:  entry:
2665*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
2666*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vclz.b(<16 x i8> [[TMP0]])
2667*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
2668*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2669673c5308Schenli //
vclz_b(v16i8 _1)2670673c5308Schenli v16i8 vclz_b(v16i8 _1) { return __lsx_vclz_b(_1); }
2671673c5308Schenli // CHECK-LABEL: @vclz_h(
2672673c5308Schenli // CHECK-NEXT:  entry:
2673*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
2674*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vclz.h(<8 x i16> [[TMP0]])
2675*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
2676*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2677673c5308Schenli //
vclz_h(v8i16 _1)2678673c5308Schenli v8i16 vclz_h(v8i16 _1) { return __lsx_vclz_h(_1); }
2679673c5308Schenli // CHECK-LABEL: @vclz_w(
2680673c5308Schenli // CHECK-NEXT:  entry:
2681*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
2682*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vclz.w(<4 x i32> [[TMP0]])
2683*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
2684*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2685673c5308Schenli //
vclz_w(v4i32 _1)2686673c5308Schenli v4i32 vclz_w(v4i32 _1) { return __lsx_vclz_w(_1); }
2687673c5308Schenli // CHECK-LABEL: @vclz_d(
2688673c5308Schenli // CHECK-NEXT:  entry:
2689*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
2690*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vclz.d(<2 x i64> [[TMP0]])
2691*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
2692*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2693673c5308Schenli //
vclz_d(v2i64 _1)2694673c5308Schenli v2i64 vclz_d(v2i64 _1) { return __lsx_vclz_d(_1); }
2695673c5308Schenli // CHECK-LABEL: @vpickve2gr_b(
2696673c5308Schenli // CHECK-NEXT:  entry:
2697*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
2698*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.loongarch.lsx.vpickve2gr.b(<16 x i8> [[TMP0]], i32 1)
2699*0e01c72cSyjijd // CHECK-NEXT:    ret i32 [[TMP1]]
2700673c5308Schenli //
vpickve2gr_b(v16i8 _1)2701673c5308Schenli int vpickve2gr_b(v16i8 _1) { return __lsx_vpickve2gr_b(_1, 1); }
2702673c5308Schenli // CHECK-LABEL: @vpickve2gr_h(
2703673c5308Schenli // CHECK-NEXT:  entry:
2704*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
2705*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.loongarch.lsx.vpickve2gr.h(<8 x i16> [[TMP0]], i32 1)
2706*0e01c72cSyjijd // CHECK-NEXT:    ret i32 [[TMP1]]
2707673c5308Schenli //
vpickve2gr_h(v8i16 _1)2708673c5308Schenli int vpickve2gr_h(v8i16 _1) { return __lsx_vpickve2gr_h(_1, 1); }
2709673c5308Schenli // CHECK-LABEL: @vpickve2gr_w(
2710673c5308Schenli // CHECK-NEXT:  entry:
2711*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
2712*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.loongarch.lsx.vpickve2gr.w(<4 x i32> [[TMP0]], i32 1)
2713*0e01c72cSyjijd // CHECK-NEXT:    ret i32 [[TMP1]]
2714673c5308Schenli //
vpickve2gr_w(v4i32 _1)2715673c5308Schenli int vpickve2gr_w(v4i32 _1) { return __lsx_vpickve2gr_w(_1, 1); }
2716673c5308Schenli // CHECK-LABEL: @vpickve2gr_d(
2717673c5308Schenli // CHECK-NEXT:  entry:
2718*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
2719*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call i64 @llvm.loongarch.lsx.vpickve2gr.d(<2 x i64> [[TMP0]], i32 1)
2720*0e01c72cSyjijd // CHECK-NEXT:    ret i64 [[TMP1]]
2721673c5308Schenli //
vpickve2gr_d(v2i64 _1)2722673c5308Schenli long vpickve2gr_d(v2i64 _1) { return __lsx_vpickve2gr_d(_1, 1); }
2723673c5308Schenli // CHECK-LABEL: @vpickve2gr_bu(
2724673c5308Schenli // CHECK-NEXT:  entry:
2725*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
2726*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.loongarch.lsx.vpickve2gr.bu(<16 x i8> [[TMP0]], i32 1)
2727*0e01c72cSyjijd // CHECK-NEXT:    ret i32 [[TMP1]]
2728673c5308Schenli //
vpickve2gr_bu(v16i8 _1)2729673c5308Schenli unsigned int vpickve2gr_bu(v16i8 _1) { return __lsx_vpickve2gr_bu(_1, 1); }
2730673c5308Schenli // CHECK-LABEL: @vpickve2gr_hu(
2731673c5308Schenli // CHECK-NEXT:  entry:
2732*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
2733*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.loongarch.lsx.vpickve2gr.hu(<8 x i16> [[TMP0]], i32 1)
2734*0e01c72cSyjijd // CHECK-NEXT:    ret i32 [[TMP1]]
2735673c5308Schenli //
vpickve2gr_hu(v8i16 _1)2736673c5308Schenli unsigned int vpickve2gr_hu(v8i16 _1) { return __lsx_vpickve2gr_hu(_1, 1); }
2737673c5308Schenli // CHECK-LABEL: @vpickve2gr_wu(
2738673c5308Schenli // CHECK-NEXT:  entry:
2739*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
2740*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.loongarch.lsx.vpickve2gr.wu(<4 x i32> [[TMP0]], i32 1)
2741*0e01c72cSyjijd // CHECK-NEXT:    ret i32 [[TMP1]]
2742673c5308Schenli //
vpickve2gr_wu(v4i32 _1)2743673c5308Schenli unsigned int vpickve2gr_wu(v4i32 _1) { return __lsx_vpickve2gr_wu(_1, 1); }
2744673c5308Schenli // CHECK-LABEL: @vpickve2gr_du(
2745673c5308Schenli // CHECK-NEXT:  entry:
2746*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
2747*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call i64 @llvm.loongarch.lsx.vpickve2gr.du(<2 x i64> [[TMP0]], i32 1)
2748*0e01c72cSyjijd // CHECK-NEXT:    ret i64 [[TMP1]]
2749673c5308Schenli //
vpickve2gr_du(v2i64 _1)2750673c5308Schenli unsigned long int vpickve2gr_du(v2i64 _1) { return __lsx_vpickve2gr_du(_1, 1); }
2751673c5308Schenli // CHECK-LABEL: @vinsgr2vr_b(
2752673c5308Schenli // CHECK-NEXT:  entry:
2753*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
2754*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vinsgr2vr.b(<16 x i8> [[TMP0]], i32 1, i32 1)
2755*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
2756*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2757673c5308Schenli //
vinsgr2vr_b(v16i8 _1)2758673c5308Schenli v16i8 vinsgr2vr_b(v16i8 _1) { return __lsx_vinsgr2vr_b(_1, 1, 1); }
2759673c5308Schenli // CHECK-LABEL: @vinsgr2vr_h(
2760673c5308Schenli // CHECK-NEXT:  entry:
2761*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
2762*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vinsgr2vr.h(<8 x i16> [[TMP0]], i32 1, i32 1)
2763*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
2764*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2765673c5308Schenli //
vinsgr2vr_h(v8i16 _1)2766673c5308Schenli v8i16 vinsgr2vr_h(v8i16 _1) { return __lsx_vinsgr2vr_h(_1, 1, 1); }
2767673c5308Schenli // CHECK-LABEL: @vinsgr2vr_w(
2768673c5308Schenli // CHECK-NEXT:  entry:
2769*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
2770*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vinsgr2vr.w(<4 x i32> [[TMP0]], i32 1, i32 1)
2771*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
2772*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2773673c5308Schenli //
vinsgr2vr_w(v4i32 _1)2774673c5308Schenli v4i32 vinsgr2vr_w(v4i32 _1) { return __lsx_vinsgr2vr_w(_1, 1, 1); }
2775673c5308Schenli // CHECK-LABEL: @vinsgr2vr_d(
2776673c5308Schenli // CHECK-NEXT:  entry:
2777*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
2778*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vinsgr2vr.d(<2 x i64> [[TMP0]], i64 1, i32 1)
2779*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
2780*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2781673c5308Schenli //
vinsgr2vr_d(v2i64 _1)2782673c5308Schenli v2i64 vinsgr2vr_d(v2i64 _1) { return __lsx_vinsgr2vr_d(_1, 1, 1); }
2783673c5308Schenli // CHECK-LABEL: @vfadd_s(
2784673c5308Schenli // CHECK-NEXT:  entry:
2785*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
2786*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
2787*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfadd.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
2788*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x float> [[TMP2]] to i128
2789*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2790673c5308Schenli //
vfadd_s(v4f32 _1,v4f32 _2)2791673c5308Schenli v4f32 vfadd_s(v4f32 _1, v4f32 _2) { return __lsx_vfadd_s(_1, _2); }
2792673c5308Schenli // CHECK-LABEL: @vfadd_d(
2793673c5308Schenli // CHECK-NEXT:  entry:
2794*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
2795*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
2796*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfadd.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
2797*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x double> [[TMP2]] to i128
2798*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2799673c5308Schenli //
vfadd_d(v2f64 _1,v2f64 _2)2800673c5308Schenli v2f64 vfadd_d(v2f64 _1, v2f64 _2) { return __lsx_vfadd_d(_1, _2); }
2801673c5308Schenli // CHECK-LABEL: @vfsub_s(
2802673c5308Schenli // CHECK-NEXT:  entry:
2803*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
2804*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
2805*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfsub.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
2806*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x float> [[TMP2]] to i128
2807*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2808673c5308Schenli //
vfsub_s(v4f32 _1,v4f32 _2)2809673c5308Schenli v4f32 vfsub_s(v4f32 _1, v4f32 _2) { return __lsx_vfsub_s(_1, _2); }
2810673c5308Schenli // CHECK-LABEL: @vfsub_d(
2811673c5308Schenli // CHECK-NEXT:  entry:
2812*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
2813*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
2814*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfsub.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
2815*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x double> [[TMP2]] to i128
2816*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2817673c5308Schenli //
vfsub_d(v2f64 _1,v2f64 _2)2818673c5308Schenli v2f64 vfsub_d(v2f64 _1, v2f64 _2) { return __lsx_vfsub_d(_1, _2); }
2819673c5308Schenli // CHECK-LABEL: @vfmul_s(
2820673c5308Schenli // CHECK-NEXT:  entry:
2821*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
2822*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
2823*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfmul.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
2824*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x float> [[TMP2]] to i128
2825*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2826673c5308Schenli //
vfmul_s(v4f32 _1,v4f32 _2)2827673c5308Schenli v4f32 vfmul_s(v4f32 _1, v4f32 _2) { return __lsx_vfmul_s(_1, _2); }
2828673c5308Schenli // CHECK-LABEL: @vfmul_d(
2829673c5308Schenli // CHECK-NEXT:  entry:
2830*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
2831*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
2832*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfmul.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
2833*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x double> [[TMP2]] to i128
2834*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2835673c5308Schenli //
vfmul_d(v2f64 _1,v2f64 _2)2836673c5308Schenli v2f64 vfmul_d(v2f64 _1, v2f64 _2) { return __lsx_vfmul_d(_1, _2); }
2837673c5308Schenli // CHECK-LABEL: @vfdiv_s(
2838673c5308Schenli // CHECK-NEXT:  entry:
2839*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
2840*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
2841*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfdiv.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
2842*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x float> [[TMP2]] to i128
2843*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2844673c5308Schenli //
vfdiv_s(v4f32 _1,v4f32 _2)2845673c5308Schenli v4f32 vfdiv_s(v4f32 _1, v4f32 _2) { return __lsx_vfdiv_s(_1, _2); }
2846673c5308Schenli // CHECK-LABEL: @vfdiv_d(
2847673c5308Schenli // CHECK-NEXT:  entry:
2848*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
2849*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
2850*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfdiv.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
2851*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x double> [[TMP2]] to i128
2852*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2853673c5308Schenli //
vfdiv_d(v2f64 _1,v2f64 _2)2854673c5308Schenli v2f64 vfdiv_d(v2f64 _1, v2f64 _2) { return __lsx_vfdiv_d(_1, _2); }
2855673c5308Schenli // CHECK-LABEL: @vfcvt_h_s(
2856673c5308Schenli // CHECK-NEXT:  entry:
2857*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
2858*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
2859*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vfcvt.h.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
2860*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
2861*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2862673c5308Schenli //
vfcvt_h_s(v4f32 _1,v4f32 _2)2863673c5308Schenli v8i16 vfcvt_h_s(v4f32 _1, v4f32 _2) { return __lsx_vfcvt_h_s(_1, _2); }
2864673c5308Schenli // CHECK-LABEL: @vfcvt_s_d(
2865673c5308Schenli // CHECK-NEXT:  entry:
2866*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
2867*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
2868*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfcvt.s.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
2869*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x float> [[TMP2]] to i128
2870*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2871673c5308Schenli //
vfcvt_s_d(v2f64 _1,v2f64 _2)2872673c5308Schenli v4f32 vfcvt_s_d(v2f64 _1, v2f64 _2) { return __lsx_vfcvt_s_d(_1, _2); }
2873673c5308Schenli // CHECK-LABEL: @vfmin_s(
2874673c5308Schenli // CHECK-NEXT:  entry:
2875*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
2876*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
2877*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfmin.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
2878*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x float> [[TMP2]] to i128
2879*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2880673c5308Schenli //
vfmin_s(v4f32 _1,v4f32 _2)2881673c5308Schenli v4f32 vfmin_s(v4f32 _1, v4f32 _2) { return __lsx_vfmin_s(_1, _2); }
2882673c5308Schenli // CHECK-LABEL: @vfmin_d(
2883673c5308Schenli // CHECK-NEXT:  entry:
2884*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
2885*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
2886*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfmin.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
2887*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x double> [[TMP2]] to i128
2888*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2889673c5308Schenli //
vfmin_d(v2f64 _1,v2f64 _2)2890673c5308Schenli v2f64 vfmin_d(v2f64 _1, v2f64 _2) { return __lsx_vfmin_d(_1, _2); }
2891673c5308Schenli // CHECK-LABEL: @vfmina_s(
2892673c5308Schenli // CHECK-NEXT:  entry:
2893*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
2894*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
2895*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfmina.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
2896*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x float> [[TMP2]] to i128
2897*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2898673c5308Schenli //
vfmina_s(v4f32 _1,v4f32 _2)2899673c5308Schenli v4f32 vfmina_s(v4f32 _1, v4f32 _2) { return __lsx_vfmina_s(_1, _2); }
2900673c5308Schenli // CHECK-LABEL: @vfmina_d(
2901673c5308Schenli // CHECK-NEXT:  entry:
2902*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
2903*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
2904*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfmina.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
2905*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x double> [[TMP2]] to i128
2906*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2907673c5308Schenli //
vfmina_d(v2f64 _1,v2f64 _2)2908673c5308Schenli v2f64 vfmina_d(v2f64 _1, v2f64 _2) { return __lsx_vfmina_d(_1, _2); }
2909673c5308Schenli // CHECK-LABEL: @vfmax_s(
2910673c5308Schenli // CHECK-NEXT:  entry:
2911*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
2912*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
2913*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfmax.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
2914*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x float> [[TMP2]] to i128
2915*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2916673c5308Schenli //
vfmax_s(v4f32 _1,v4f32 _2)2917673c5308Schenli v4f32 vfmax_s(v4f32 _1, v4f32 _2) { return __lsx_vfmax_s(_1, _2); }
2918673c5308Schenli // CHECK-LABEL: @vfmax_d(
2919673c5308Schenli // CHECK-NEXT:  entry:
2920*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
2921*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
2922*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfmax.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
2923*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x double> [[TMP2]] to i128
2924*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2925673c5308Schenli //
vfmax_d(v2f64 _1,v2f64 _2)2926673c5308Schenli v2f64 vfmax_d(v2f64 _1, v2f64 _2) { return __lsx_vfmax_d(_1, _2); }
2927673c5308Schenli // CHECK-LABEL: @vfmaxa_s(
2928673c5308Schenli // CHECK-NEXT:  entry:
2929*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
2930*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
2931*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfmaxa.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
2932*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x float> [[TMP2]] to i128
2933*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2934673c5308Schenli //
vfmaxa_s(v4f32 _1,v4f32 _2)2935673c5308Schenli v4f32 vfmaxa_s(v4f32 _1, v4f32 _2) { return __lsx_vfmaxa_s(_1, _2); }
2936673c5308Schenli // CHECK-LABEL: @vfmaxa_d(
2937673c5308Schenli // CHECK-NEXT:  entry:
2938*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
2939*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
2940*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfmaxa.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
2941*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x double> [[TMP2]] to i128
2942*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
2943673c5308Schenli //
vfmaxa_d(v2f64 _1,v2f64 _2)2944673c5308Schenli v2f64 vfmaxa_d(v2f64 _1, v2f64 _2) { return __lsx_vfmaxa_d(_1, _2); }
2945673c5308Schenli // CHECK-LABEL: @vfclass_s(
2946673c5308Schenli // CHECK-NEXT:  entry:
2947*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
2948*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfclass.s(<4 x float> [[TMP0]])
2949*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
2950*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2951673c5308Schenli //
vfclass_s(v4f32 _1)2952673c5308Schenli v4i32 vfclass_s(v4f32 _1) { return __lsx_vfclass_s(_1); }
2953673c5308Schenli // CHECK-LABEL: @vfclass_d(
2954673c5308Schenli // CHECK-NEXT:  entry:
2955*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
2956*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfclass.d(<2 x double> [[TMP0]])
2957*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
2958*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2959673c5308Schenli //
vfclass_d(v2f64 _1)2960673c5308Schenli v2i64 vfclass_d(v2f64 _1) { return __lsx_vfclass_d(_1); }
2961673c5308Schenli // CHECK-LABEL: @vfsqrt_s(
2962673c5308Schenli // CHECK-NEXT:  entry:
2963*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
2964*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfsqrt.s(<4 x float> [[TMP0]])
2965*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x float> [[TMP1]] to i128
2966*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2967673c5308Schenli //
vfsqrt_s(v4f32 _1)2968673c5308Schenli v4f32 vfsqrt_s(v4f32 _1) { return __lsx_vfsqrt_s(_1); }
2969673c5308Schenli // CHECK-LABEL: @vfsqrt_d(
2970673c5308Schenli // CHECK-NEXT:  entry:
2971*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
2972*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfsqrt.d(<2 x double> [[TMP0]])
2973*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x double> [[TMP1]] to i128
2974*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2975673c5308Schenli //
vfsqrt_d(v2f64 _1)2976673c5308Schenli v2f64 vfsqrt_d(v2f64 _1) { return __lsx_vfsqrt_d(_1); }
2977673c5308Schenli // CHECK-LABEL: @vfrecip_s(
2978673c5308Schenli // CHECK-NEXT:  entry:
2979*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
2980*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfrecip.s(<4 x float> [[TMP0]])
2981*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x float> [[TMP1]] to i128
2982*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2983673c5308Schenli //
vfrecip_s(v4f32 _1)2984673c5308Schenli v4f32 vfrecip_s(v4f32 _1) { return __lsx_vfrecip_s(_1); }
2985673c5308Schenli // CHECK-LABEL: @vfrecip_d(
2986673c5308Schenli // CHECK-NEXT:  entry:
2987*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
2988*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfrecip.d(<2 x double> [[TMP0]])
2989*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x double> [[TMP1]] to i128
2990*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2991673c5308Schenli //
vfrecip_d(v2f64 _1)2992673c5308Schenli v2f64 vfrecip_d(v2f64 _1) { return __lsx_vfrecip_d(_1); }
2993673c5308Schenli // CHECK-LABEL: @vfrint_s(
2994673c5308Schenli // CHECK-NEXT:  entry:
2995*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
2996*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfrint.s(<4 x float> [[TMP0]])
2997*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x float> [[TMP1]] to i128
2998*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
2999673c5308Schenli //
vfrint_s(v4f32 _1)3000673c5308Schenli v4f32 vfrint_s(v4f32 _1) { return __lsx_vfrint_s(_1); }
3001673c5308Schenli // CHECK-LABEL: @vfrint_d(
3002673c5308Schenli // CHECK-NEXT:  entry:
3003*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
3004*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfrint.d(<2 x double> [[TMP0]])
3005*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x double> [[TMP1]] to i128
3006*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3007673c5308Schenli //
vfrint_d(v2f64 _1)3008673c5308Schenli v2f64 vfrint_d(v2f64 _1) { return __lsx_vfrint_d(_1); }
3009673c5308Schenli // CHECK-LABEL: @vfrsqrt_s(
3010673c5308Schenli // CHECK-NEXT:  entry:
3011*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
3012*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfrsqrt.s(<4 x float> [[TMP0]])
3013*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x float> [[TMP1]] to i128
3014*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3015673c5308Schenli //
vfrsqrt_s(v4f32 _1)3016673c5308Schenli v4f32 vfrsqrt_s(v4f32 _1) { return __lsx_vfrsqrt_s(_1); }
3017673c5308Schenli // CHECK-LABEL: @vfrsqrt_d(
3018673c5308Schenli // CHECK-NEXT:  entry:
3019*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
3020*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfrsqrt.d(<2 x double> [[TMP0]])
3021*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x double> [[TMP1]] to i128
3022*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3023673c5308Schenli //
vfrsqrt_d(v2f64 _1)3024673c5308Schenli v2f64 vfrsqrt_d(v2f64 _1) { return __lsx_vfrsqrt_d(_1); }
3025673c5308Schenli // CHECK-LABEL: @vflogb_s(
3026673c5308Schenli // CHECK-NEXT:  entry:
3027*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
3028*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vflogb.s(<4 x float> [[TMP0]])
3029*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x float> [[TMP1]] to i128
3030*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3031673c5308Schenli //
vflogb_s(v4f32 _1)3032673c5308Schenli v4f32 vflogb_s(v4f32 _1) { return __lsx_vflogb_s(_1); }
3033673c5308Schenli // CHECK-LABEL: @vflogb_d(
3034673c5308Schenli // CHECK-NEXT:  entry:
3035*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
3036*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vflogb.d(<2 x double> [[TMP0]])
3037*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x double> [[TMP1]] to i128
3038*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3039673c5308Schenli //
vflogb_d(v2f64 _1)3040673c5308Schenli v2f64 vflogb_d(v2f64 _1) { return __lsx_vflogb_d(_1); }
3041673c5308Schenli // CHECK-LABEL: @vfcvth_s_h(
3042673c5308Schenli // CHECK-NEXT:  entry:
3043*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
3044*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfcvth.s.h(<8 x i16> [[TMP0]])
3045*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x float> [[TMP1]] to i128
3046*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3047673c5308Schenli //
vfcvth_s_h(v8i16 _1)3048673c5308Schenli v4f32 vfcvth_s_h(v8i16 _1) { return __lsx_vfcvth_s_h(_1); }
3049673c5308Schenli // CHECK-LABEL: @vfcvth_d_s(
3050673c5308Schenli // CHECK-NEXT:  entry:
3051*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
3052*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfcvth.d.s(<4 x float> [[TMP0]])
3053*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x double> [[TMP1]] to i128
3054*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3055673c5308Schenli //
vfcvth_d_s(v4f32 _1)3056673c5308Schenli v2f64 vfcvth_d_s(v4f32 _1) { return __lsx_vfcvth_d_s(_1); }
3057673c5308Schenli // CHECK-LABEL: @vfcvtl_s_h(
3058673c5308Schenli // CHECK-NEXT:  entry:
3059*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
3060*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfcvtl.s.h(<8 x i16> [[TMP0]])
3061*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x float> [[TMP1]] to i128
3062*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3063673c5308Schenli //
vfcvtl_s_h(v8i16 _1)3064673c5308Schenli v4f32 vfcvtl_s_h(v8i16 _1) { return __lsx_vfcvtl_s_h(_1); }
3065673c5308Schenli // CHECK-LABEL: @vfcvtl_d_s(
3066673c5308Schenli // CHECK-NEXT:  entry:
3067*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
3068*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfcvtl.d.s(<4 x float> [[TMP0]])
3069*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x double> [[TMP1]] to i128
3070*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3071673c5308Schenli //
vfcvtl_d_s(v4f32 _1)3072673c5308Schenli v2f64 vfcvtl_d_s(v4f32 _1) { return __lsx_vfcvtl_d_s(_1); }
3073673c5308Schenli // CHECK-LABEL: @vftint_w_s(
3074673c5308Schenli // CHECK-NEXT:  entry:
3075*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
3076*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vftint.w.s(<4 x float> [[TMP0]])
3077*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
3078*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3079673c5308Schenli //
vftint_w_s(v4f32 _1)3080673c5308Schenli v4i32 vftint_w_s(v4f32 _1) { return __lsx_vftint_w_s(_1); }
3081673c5308Schenli // CHECK-LABEL: @vftint_l_d(
3082673c5308Schenli // CHECK-NEXT:  entry:
3083*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
3084*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vftint.l.d(<2 x double> [[TMP0]])
3085*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
3086*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3087673c5308Schenli //
vftint_l_d(v2f64 _1)3088673c5308Schenli v2i64 vftint_l_d(v2f64 _1) { return __lsx_vftint_l_d(_1); }
3089673c5308Schenli // CHECK-LABEL: @vftint_wu_s(
3090673c5308Schenli // CHECK-NEXT:  entry:
3091*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
3092*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vftint.wu.s(<4 x float> [[TMP0]])
3093*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
3094*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3095673c5308Schenli //
vftint_wu_s(v4f32 _1)3096673c5308Schenli v4u32 vftint_wu_s(v4f32 _1) { return __lsx_vftint_wu_s(_1); }
3097673c5308Schenli // CHECK-LABEL: @vftint_lu_d(
3098673c5308Schenli // CHECK-NEXT:  entry:
3099*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
3100*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vftint.lu.d(<2 x double> [[TMP0]])
3101*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
3102*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3103673c5308Schenli //
vftint_lu_d(v2f64 _1)3104673c5308Schenli v2u64 vftint_lu_d(v2f64 _1) { return __lsx_vftint_lu_d(_1); }
3105673c5308Schenli // CHECK-LABEL: @vftintrz_w_s(
3106673c5308Schenli // CHECK-NEXT:  entry:
3107*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
3108*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vftintrz.w.s(<4 x float> [[TMP0]])
3109*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
3110*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3111673c5308Schenli //
vftintrz_w_s(v4f32 _1)3112673c5308Schenli v4i32 vftintrz_w_s(v4f32 _1) { return __lsx_vftintrz_w_s(_1); }
3113673c5308Schenli // CHECK-LABEL: @vftintrz_l_d(
3114673c5308Schenli // CHECK-NEXT:  entry:
3115*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
3116*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vftintrz.l.d(<2 x double> [[TMP0]])
3117*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
3118*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3119673c5308Schenli //
vftintrz_l_d(v2f64 _1)3120673c5308Schenli v2i64 vftintrz_l_d(v2f64 _1) { return __lsx_vftintrz_l_d(_1); }
3121673c5308Schenli // CHECK-LABEL: @vftintrz_wu_s(
3122673c5308Schenli // CHECK-NEXT:  entry:
3123*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
3124*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vftintrz.wu.s(<4 x float> [[TMP0]])
3125*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
3126*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3127673c5308Schenli //
vftintrz_wu_s(v4f32 _1)3128673c5308Schenli v4u32 vftintrz_wu_s(v4f32 _1) { return __lsx_vftintrz_wu_s(_1); }
3129673c5308Schenli // CHECK-LABEL: @vftintrz_lu_d(
3130673c5308Schenli // CHECK-NEXT:  entry:
3131*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
3132*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vftintrz.lu.d(<2 x double> [[TMP0]])
3133*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
3134*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3135673c5308Schenli //
vftintrz_lu_d(v2f64 _1)3136673c5308Schenli v2u64 vftintrz_lu_d(v2f64 _1) { return __lsx_vftintrz_lu_d(_1); }
3137673c5308Schenli // CHECK-LABEL: @vffint_s_w(
3138673c5308Schenli // CHECK-NEXT:  entry:
3139*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
3140*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vffint.s.w(<4 x i32> [[TMP0]])
3141*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x float> [[TMP1]] to i128
3142*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3143673c5308Schenli //
vffint_s_w(v4i32 _1)3144673c5308Schenli v4f32 vffint_s_w(v4i32 _1) { return __lsx_vffint_s_w(_1); }
3145673c5308Schenli // CHECK-LABEL: @vffint_d_l(
3146673c5308Schenli // CHECK-NEXT:  entry:
3147*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
3148*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vffint.d.l(<2 x i64> [[TMP0]])
3149*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x double> [[TMP1]] to i128
3150*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3151673c5308Schenli //
vffint_d_l(v2i64 _1)3152673c5308Schenli v2f64 vffint_d_l(v2i64 _1) { return __lsx_vffint_d_l(_1); }
3153673c5308Schenli // CHECK-LABEL: @vffint_s_wu(
3154673c5308Schenli // CHECK-NEXT:  entry:
3155*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
3156*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vffint.s.wu(<4 x i32> [[TMP0]])
3157*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x float> [[TMP1]] to i128
3158*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3159673c5308Schenli //
vffint_s_wu(v4u32 _1)3160673c5308Schenli v4f32 vffint_s_wu(v4u32 _1) { return __lsx_vffint_s_wu(_1); }
3161673c5308Schenli // CHECK-LABEL: @vffint_d_lu(
3162673c5308Schenli // CHECK-NEXT:  entry:
3163*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
3164*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vffint.d.lu(<2 x i64> [[TMP0]])
3165*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x double> [[TMP1]] to i128
3166*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3167673c5308Schenli //
vffint_d_lu(v2u64 _1)3168673c5308Schenli v2f64 vffint_d_lu(v2u64 _1) { return __lsx_vffint_d_lu(_1); }
3169673c5308Schenli // CHECK-LABEL: @vandn_v(
3170673c5308Schenli // CHECK-NEXT:  entry:
3171*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
3172*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
3173*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vandn.v(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
3174*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
3175*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3176673c5308Schenli //
vandn_v(v16u8 _1,v16u8 _2)3177673c5308Schenli v16u8 vandn_v(v16u8 _1, v16u8 _2) { return __lsx_vandn_v(_1, _2); }
3178673c5308Schenli // CHECK-LABEL: @vneg_b(
3179673c5308Schenli // CHECK-NEXT:  entry:
3180*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
3181*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vneg.b(<16 x i8> [[TMP0]])
3182*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
3183*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3184673c5308Schenli //
vneg_b(v16i8 _1)3185673c5308Schenli v16i8 vneg_b(v16i8 _1) { return __lsx_vneg_b(_1); }
3186673c5308Schenli // CHECK-LABEL: @vneg_h(
3187673c5308Schenli // CHECK-NEXT:  entry:
3188*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
3189*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vneg.h(<8 x i16> [[TMP0]])
3190*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
3191*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3192673c5308Schenli //
vneg_h(v8i16 _1)3193673c5308Schenli v8i16 vneg_h(v8i16 _1) { return __lsx_vneg_h(_1); }
3194673c5308Schenli // CHECK-LABEL: @vneg_w(
3195673c5308Schenli // CHECK-NEXT:  entry:
3196*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
3197*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vneg.w(<4 x i32> [[TMP0]])
3198*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
3199*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3200673c5308Schenli //
vneg_w(v4i32 _1)3201673c5308Schenli v4i32 vneg_w(v4i32 _1) { return __lsx_vneg_w(_1); }
3202673c5308Schenli // CHECK-LABEL: @vneg_d(
3203673c5308Schenli // CHECK-NEXT:  entry:
3204*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
3205*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vneg.d(<2 x i64> [[TMP0]])
3206*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
3207*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3208673c5308Schenli //
vneg_d(v2i64 _1)3209673c5308Schenli v2i64 vneg_d(v2i64 _1) { return __lsx_vneg_d(_1); }
3210673c5308Schenli // CHECK-LABEL: @vmuh_b(
3211673c5308Schenli // CHECK-NEXT:  entry:
3212*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
3213*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
3214*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmuh.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
3215*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
3216*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3217673c5308Schenli //
vmuh_b(v16i8 _1,v16i8 _2)3218673c5308Schenli v16i8 vmuh_b(v16i8 _1, v16i8 _2) { return __lsx_vmuh_b(_1, _2); }
3219673c5308Schenli // CHECK-LABEL: @vmuh_h(
3220673c5308Schenli // CHECK-NEXT:  entry:
3221*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
3222*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
3223*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmuh.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
3224*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
3225*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3226673c5308Schenli //
vmuh_h(v8i16 _1,v8i16 _2)3227673c5308Schenli v8i16 vmuh_h(v8i16 _1, v8i16 _2) { return __lsx_vmuh_h(_1, _2); }
3228673c5308Schenli // CHECK-LABEL: @vmuh_w(
3229673c5308Schenli // CHECK-NEXT:  entry:
3230*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
3231*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
3232*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmuh.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
3233*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
3234*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3235673c5308Schenli //
vmuh_w(v4i32 _1,v4i32 _2)3236673c5308Schenli v4i32 vmuh_w(v4i32 _1, v4i32 _2) { return __lsx_vmuh_w(_1, _2); }
3237673c5308Schenli // CHECK-LABEL: @vmuh_d(
3238673c5308Schenli // CHECK-NEXT:  entry:
3239*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
3240*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
3241*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmuh.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
3242*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
3243*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3244673c5308Schenli //
vmuh_d(v2i64 _1,v2i64 _2)3245673c5308Schenli v2i64 vmuh_d(v2i64 _1, v2i64 _2) { return __lsx_vmuh_d(_1, _2); }
3246673c5308Schenli // CHECK-LABEL: @vmuh_bu(
3247673c5308Schenli // CHECK-NEXT:  entry:
3248*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
3249*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
3250*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmuh.bu(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
3251*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
3252*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3253673c5308Schenli //
vmuh_bu(v16u8 _1,v16u8 _2)3254673c5308Schenli v16u8 vmuh_bu(v16u8 _1, v16u8 _2) { return __lsx_vmuh_bu(_1, _2); }
3255673c5308Schenli // CHECK-LABEL: @vmuh_hu(
3256673c5308Schenli // CHECK-NEXT:  entry:
3257*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
3258*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
3259*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmuh.hu(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
3260*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
3261*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3262673c5308Schenli //
vmuh_hu(v8u16 _1,v8u16 _2)3263673c5308Schenli v8u16 vmuh_hu(v8u16 _1, v8u16 _2) { return __lsx_vmuh_hu(_1, _2); }
3264673c5308Schenli // CHECK-LABEL: @vmuh_wu(
3265673c5308Schenli // CHECK-NEXT:  entry:
3266*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
3267*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
3268*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmuh.wu(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
3269*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
3270*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3271673c5308Schenli //
vmuh_wu(v4u32 _1,v4u32 _2)3272673c5308Schenli v4u32 vmuh_wu(v4u32 _1, v4u32 _2) { return __lsx_vmuh_wu(_1, _2); }
3273673c5308Schenli // CHECK-LABEL: @vmuh_du(
3274673c5308Schenli // CHECK-NEXT:  entry:
3275*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
3276*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
3277*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmuh.du(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
3278*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
3279*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3280673c5308Schenli //
vmuh_du(v2u64 _1,v2u64 _2)3281673c5308Schenli v2u64 vmuh_du(v2u64 _1, v2u64 _2) { return __lsx_vmuh_du(_1, _2); }
3282673c5308Schenli // CHECK-LABEL: @vsllwil_h_b(
3283673c5308Schenli // CHECK-NEXT:  entry:
3284*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
3285*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsllwil.h.b(<16 x i8> [[TMP0]], i32 1)
3286*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
3287*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3288673c5308Schenli //
vsllwil_h_b(v16i8 _1)3289673c5308Schenli v8i16 vsllwil_h_b(v16i8 _1) { return __lsx_vsllwil_h_b(_1, 1); }
3290673c5308Schenli // CHECK-LABEL: @vsllwil_w_h(
3291673c5308Schenli // CHECK-NEXT:  entry:
3292*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
3293*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsllwil.w.h(<8 x i16> [[TMP0]], i32 1)
3294*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
3295*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3296673c5308Schenli //
vsllwil_w_h(v8i16 _1)3297673c5308Schenli v4i32 vsllwil_w_h(v8i16 _1) { return __lsx_vsllwil_w_h(_1, 1); }
3298673c5308Schenli // CHECK-LABEL: @vsllwil_d_w(
3299673c5308Schenli // CHECK-NEXT:  entry:
3300*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
3301*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsllwil.d.w(<4 x i32> [[TMP0]], i32 1)
3302*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
3303*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3304673c5308Schenli //
vsllwil_d_w(v4i32 _1)3305673c5308Schenli v2i64 vsllwil_d_w(v4i32 _1) { return __lsx_vsllwil_d_w(_1, 1); }
3306673c5308Schenli // CHECK-LABEL: @vsllwil_hu_bu(
3307673c5308Schenli // CHECK-NEXT:  entry:
3308*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
3309*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsllwil.hu.bu(<16 x i8> [[TMP0]], i32 1)
3310*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
3311*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3312673c5308Schenli //
vsllwil_hu_bu(v16u8 _1)3313673c5308Schenli v8u16 vsllwil_hu_bu(v16u8 _1) { return __lsx_vsllwil_hu_bu(_1, 1); }
3314673c5308Schenli // CHECK-LABEL: @vsllwil_wu_hu(
3315673c5308Schenli // CHECK-NEXT:  entry:
3316*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
3317*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsllwil.wu.hu(<8 x i16> [[TMP0]], i32 1)
3318*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
3319*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3320673c5308Schenli //
vsllwil_wu_hu(v8u16 _1)3321673c5308Schenli v4u32 vsllwil_wu_hu(v8u16 _1) { return __lsx_vsllwil_wu_hu(_1, 1); }
3322673c5308Schenli // CHECK-LABEL: @vsllwil_du_wu(
3323673c5308Schenli // CHECK-NEXT:  entry:
3324*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
3325*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsllwil.du.wu(<4 x i32> [[TMP0]], i32 1)
3326*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
3327*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3328673c5308Schenli //
vsllwil_du_wu(v4u32 _1)3329673c5308Schenli v2u64 vsllwil_du_wu(v4u32 _1) { return __lsx_vsllwil_du_wu(_1, 1); }
3330673c5308Schenli // CHECK-LABEL: @vsran_b_h(
3331673c5308Schenli // CHECK-NEXT:  entry:
3332*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
3333*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
3334*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsran.b.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
3335*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
3336*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3337673c5308Schenli //
vsran_b_h(v8i16 _1,v8i16 _2)3338673c5308Schenli v16i8 vsran_b_h(v8i16 _1, v8i16 _2) { return __lsx_vsran_b_h(_1, _2); }
3339673c5308Schenli // CHECK-LABEL: @vsran_h_w(
3340673c5308Schenli // CHECK-NEXT:  entry:
3341*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
3342*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
3343*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsran.h.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
3344*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
3345*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3346673c5308Schenli //
vsran_h_w(v4i32 _1,v4i32 _2)3347673c5308Schenli v8i16 vsran_h_w(v4i32 _1, v4i32 _2) { return __lsx_vsran_h_w(_1, _2); }
3348673c5308Schenli // CHECK-LABEL: @vsran_w_d(
3349673c5308Schenli // CHECK-NEXT:  entry:
3350*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
3351*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
3352*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsran.w.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
3353*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
3354*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3355673c5308Schenli //
vsran_w_d(v2i64 _1,v2i64 _2)3356673c5308Schenli v4i32 vsran_w_d(v2i64 _1, v2i64 _2) { return __lsx_vsran_w_d(_1, _2); }
3357673c5308Schenli // CHECK-LABEL: @vssran_b_h(
3358673c5308Schenli // CHECK-NEXT:  entry:
3359*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
3360*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
3361*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssran.b.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
3362*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
3363*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3364673c5308Schenli //
vssran_b_h(v8i16 _1,v8i16 _2)3365673c5308Schenli v16i8 vssran_b_h(v8i16 _1, v8i16 _2) { return __lsx_vssran_b_h(_1, _2); }
3366673c5308Schenli // CHECK-LABEL: @vssran_h_w(
3367673c5308Schenli // CHECK-NEXT:  entry:
3368*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
3369*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
3370*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssran.h.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
3371*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
3372*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3373673c5308Schenli //
vssran_h_w(v4i32 _1,v4i32 _2)3374673c5308Schenli v8i16 vssran_h_w(v4i32 _1, v4i32 _2) { return __lsx_vssran_h_w(_1, _2); }
3375673c5308Schenli // CHECK-LABEL: @vssran_w_d(
3376673c5308Schenli // CHECK-NEXT:  entry:
3377*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
3378*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
3379*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssran.w.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
3380*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
3381*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3382673c5308Schenli //
vssran_w_d(v2i64 _1,v2i64 _2)3383673c5308Schenli v4i32 vssran_w_d(v2i64 _1, v2i64 _2) { return __lsx_vssran_w_d(_1, _2); }
3384673c5308Schenli // CHECK-LABEL: @vssran_bu_h(
3385673c5308Schenli // CHECK-NEXT:  entry:
3386*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
3387*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
3388*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssran.bu.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
3389*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
3390*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3391673c5308Schenli //
vssran_bu_h(v8u16 _1,v8u16 _2)3392673c5308Schenli v16u8 vssran_bu_h(v8u16 _1, v8u16 _2) { return __lsx_vssran_bu_h(_1, _2); }
3393673c5308Schenli // CHECK-LABEL: @vssran_hu_w(
3394673c5308Schenli // CHECK-NEXT:  entry:
3395*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
3396*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
3397*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssran.hu.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
3398*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
3399*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3400673c5308Schenli //
vssran_hu_w(v4u32 _1,v4u32 _2)3401673c5308Schenli v8u16 vssran_hu_w(v4u32 _1, v4u32 _2) { return __lsx_vssran_hu_w(_1, _2); }
3402673c5308Schenli // CHECK-LABEL: @vssran_wu_d(
3403673c5308Schenli // CHECK-NEXT:  entry:
3404*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
3405*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
3406*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssran.wu.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
3407*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
3408*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3409673c5308Schenli //
vssran_wu_d(v2u64 _1,v2u64 _2)3410673c5308Schenli v4u32 vssran_wu_d(v2u64 _1, v2u64 _2) { return __lsx_vssran_wu_d(_1, _2); }
3411673c5308Schenli // CHECK-LABEL: @vsrarn_b_h(
3412673c5308Schenli // CHECK-NEXT:  entry:
3413*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
3414*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
3415*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsrarn.b.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
3416*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
3417*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3418673c5308Schenli //
vsrarn_b_h(v8i16 _1,v8i16 _2)3419673c5308Schenli v16i8 vsrarn_b_h(v8i16 _1, v8i16 _2) { return __lsx_vsrarn_b_h(_1, _2); }
3420673c5308Schenli // CHECK-LABEL: @vsrarn_h_w(
3421673c5308Schenli // CHECK-NEXT:  entry:
3422*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
3423*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
3424*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsrarn.h.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
3425*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
3426*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3427673c5308Schenli //
vsrarn_h_w(v4i32 _1,v4i32 _2)3428673c5308Schenli v8i16 vsrarn_h_w(v4i32 _1, v4i32 _2) { return __lsx_vsrarn_h_w(_1, _2); }
3429673c5308Schenli // CHECK-LABEL: @vsrarn_w_d(
3430673c5308Schenli // CHECK-NEXT:  entry:
3431*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
3432*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
3433*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsrarn.w.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
3434*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
3435*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3436673c5308Schenli //
vsrarn_w_d(v2i64 _1,v2i64 _2)3437673c5308Schenli v4i32 vsrarn_w_d(v2i64 _1, v2i64 _2) { return __lsx_vsrarn_w_d(_1, _2); }
3438673c5308Schenli // CHECK-LABEL: @vssrarn_b_h(
3439673c5308Schenli // CHECK-NEXT:  entry:
3440*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
3441*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
3442*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssrarn.b.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
3443*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
3444*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3445673c5308Schenli //
vssrarn_b_h(v8i16 _1,v8i16 _2)3446673c5308Schenli v16i8 vssrarn_b_h(v8i16 _1, v8i16 _2) { return __lsx_vssrarn_b_h(_1, _2); }
3447673c5308Schenli // CHECK-LABEL: @vssrarn_h_w(
3448673c5308Schenli // CHECK-NEXT:  entry:
3449*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
3450*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
3451*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssrarn.h.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
3452*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
3453*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3454673c5308Schenli //
vssrarn_h_w(v4i32 _1,v4i32 _2)3455673c5308Schenli v8i16 vssrarn_h_w(v4i32 _1, v4i32 _2) { return __lsx_vssrarn_h_w(_1, _2); }
3456673c5308Schenli // CHECK-LABEL: @vssrarn_w_d(
3457673c5308Schenli // CHECK-NEXT:  entry:
3458*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
3459*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
3460*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssrarn.w.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
3461*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
3462*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3463673c5308Schenli //
vssrarn_w_d(v2i64 _1,v2i64 _2)3464673c5308Schenli v4i32 vssrarn_w_d(v2i64 _1, v2i64 _2) { return __lsx_vssrarn_w_d(_1, _2); }
3465673c5308Schenli // CHECK-LABEL: @vssrarn_bu_h(
3466673c5308Schenli // CHECK-NEXT:  entry:
3467*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
3468*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
3469*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssrarn.bu.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
3470*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
3471*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3472673c5308Schenli //
vssrarn_bu_h(v8u16 _1,v8u16 _2)3473673c5308Schenli v16u8 vssrarn_bu_h(v8u16 _1, v8u16 _2) { return __lsx_vssrarn_bu_h(_1, _2); }
3474673c5308Schenli // CHECK-LABEL: @vssrarn_hu_w(
3475673c5308Schenli // CHECK-NEXT:  entry:
3476*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
3477*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
3478*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssrarn.hu.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
3479*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
3480*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3481673c5308Schenli //
vssrarn_hu_w(v4u32 _1,v4u32 _2)3482673c5308Schenli v8u16 vssrarn_hu_w(v4u32 _1, v4u32 _2) { return __lsx_vssrarn_hu_w(_1, _2); }
3483673c5308Schenli // CHECK-LABEL: @vssrarn_wu_d(
3484673c5308Schenli // CHECK-NEXT:  entry:
3485*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
3486*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
3487*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssrarn.wu.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
3488*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
3489*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3490673c5308Schenli //
vssrarn_wu_d(v2u64 _1,v2u64 _2)3491673c5308Schenli v4u32 vssrarn_wu_d(v2u64 _1, v2u64 _2) { return __lsx_vssrarn_wu_d(_1, _2); }
3492673c5308Schenli // CHECK-LABEL: @vsrln_b_h(
3493673c5308Schenli // CHECK-NEXT:  entry:
3494*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
3495*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
3496*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsrln.b.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
3497*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
3498*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3499673c5308Schenli //
vsrln_b_h(v8i16 _1,v8i16 _2)3500673c5308Schenli v16i8 vsrln_b_h(v8i16 _1, v8i16 _2) { return __lsx_vsrln_b_h(_1, _2); }
3501673c5308Schenli // CHECK-LABEL: @vsrln_h_w(
3502673c5308Schenli // CHECK-NEXT:  entry:
3503*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
3504*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
3505*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsrln.h.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
3506*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
3507*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3508673c5308Schenli //
vsrln_h_w(v4i32 _1,v4i32 _2)3509673c5308Schenli v8i16 vsrln_h_w(v4i32 _1, v4i32 _2) { return __lsx_vsrln_h_w(_1, _2); }
3510673c5308Schenli // CHECK-LABEL: @vsrln_w_d(
3511673c5308Schenli // CHECK-NEXT:  entry:
3512*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
3513*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
3514*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsrln.w.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
3515*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
3516*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3517673c5308Schenli //
vsrln_w_d(v2i64 _1,v2i64 _2)3518673c5308Schenli v4i32 vsrln_w_d(v2i64 _1, v2i64 _2) { return __lsx_vsrln_w_d(_1, _2); }
3519673c5308Schenli // CHECK-LABEL: @vssrln_bu_h(
3520673c5308Schenli // CHECK-NEXT:  entry:
3521*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
3522*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
3523*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssrln.bu.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
3524*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
3525*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3526673c5308Schenli //
vssrln_bu_h(v8u16 _1,v8u16 _2)3527673c5308Schenli v16u8 vssrln_bu_h(v8u16 _1, v8u16 _2) { return __lsx_vssrln_bu_h(_1, _2); }
3528673c5308Schenli // CHECK-LABEL: @vssrln_hu_w(
3529673c5308Schenli // CHECK-NEXT:  entry:
3530*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
3531*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
3532*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssrln.hu.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
3533*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
3534*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3535673c5308Schenli //
vssrln_hu_w(v4u32 _1,v4u32 _2)3536673c5308Schenli v8u16 vssrln_hu_w(v4u32 _1, v4u32 _2) { return __lsx_vssrln_hu_w(_1, _2); }
3537673c5308Schenli // CHECK-LABEL: @vssrln_wu_d(
3538673c5308Schenli // CHECK-NEXT:  entry:
3539*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
3540*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
3541*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssrln.wu.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
3542*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
3543*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3544673c5308Schenli //
vssrln_wu_d(v2u64 _1,v2u64 _2)3545673c5308Schenli v4u32 vssrln_wu_d(v2u64 _1, v2u64 _2) { return __lsx_vssrln_wu_d(_1, _2); }
3546673c5308Schenli // CHECK-LABEL: @vsrlrn_b_h(
3547673c5308Schenli // CHECK-NEXT:  entry:
3548*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
3549*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
3550*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsrlrn.b.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
3551*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
3552*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3553673c5308Schenli //
vsrlrn_b_h(v8i16 _1,v8i16 _2)3554673c5308Schenli v16i8 vsrlrn_b_h(v8i16 _1, v8i16 _2) { return __lsx_vsrlrn_b_h(_1, _2); }
3555673c5308Schenli // CHECK-LABEL: @vsrlrn_h_w(
3556673c5308Schenli // CHECK-NEXT:  entry:
3557*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
3558*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
3559*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsrlrn.h.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
3560*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
3561*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3562673c5308Schenli //
vsrlrn_h_w(v4i32 _1,v4i32 _2)3563673c5308Schenli v8i16 vsrlrn_h_w(v4i32 _1, v4i32 _2) { return __lsx_vsrlrn_h_w(_1, _2); }
3564673c5308Schenli // CHECK-LABEL: @vsrlrn_w_d(
3565673c5308Schenli // CHECK-NEXT:  entry:
3566*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
3567*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
3568*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsrlrn.w.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
3569*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
3570*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3571673c5308Schenli //
vsrlrn_w_d(v2i64 _1,v2i64 _2)3572673c5308Schenli v4i32 vsrlrn_w_d(v2i64 _1, v2i64 _2) { return __lsx_vsrlrn_w_d(_1, _2); }
3573673c5308Schenli // CHECK-LABEL: @vssrlrn_bu_h(
3574673c5308Schenli // CHECK-NEXT:  entry:
3575*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
3576*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
3577*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssrlrn.bu.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
3578*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
3579*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3580673c5308Schenli //
vssrlrn_bu_h(v8u16 _1,v8u16 _2)3581673c5308Schenli v16u8 vssrlrn_bu_h(v8u16 _1, v8u16 _2) { return __lsx_vssrlrn_bu_h(_1, _2); }
3582673c5308Schenli // CHECK-LABEL: @vssrlrn_hu_w(
3583673c5308Schenli // CHECK-NEXT:  entry:
3584*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
3585*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
3586*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssrlrn.hu.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
3587*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
3588*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3589673c5308Schenli //
vssrlrn_hu_w(v4u32 _1,v4u32 _2)3590673c5308Schenli v8u16 vssrlrn_hu_w(v4u32 _1, v4u32 _2) { return __lsx_vssrlrn_hu_w(_1, _2); }
3591673c5308Schenli // CHECK-LABEL: @vssrlrn_wu_d(
3592673c5308Schenli // CHECK-NEXT:  entry:
3593*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
3594*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
3595*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssrlrn.wu.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
3596*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
3597*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3598673c5308Schenli //
vssrlrn_wu_d(v2u64 _1,v2u64 _2)3599673c5308Schenli v4u32 vssrlrn_wu_d(v2u64 _1, v2u64 _2) { return __lsx_vssrlrn_wu_d(_1, _2); }
3600673c5308Schenli // CHECK-LABEL: @vfrstpi_b(
3601673c5308Schenli // CHECK-NEXT:  entry:
3602*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
3603*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
3604*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vfrstpi.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]], i32 1)
3605*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
3606*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3607673c5308Schenli //
vfrstpi_b(v16i8 _1,v16i8 _2)3608673c5308Schenli v16i8 vfrstpi_b(v16i8 _1, v16i8 _2) { return __lsx_vfrstpi_b(_1, _2, 1); }
3609673c5308Schenli // CHECK-LABEL: @vfrstpi_h(
3610673c5308Schenli // CHECK-NEXT:  entry:
3611*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
3612*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
3613*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vfrstpi.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]], i32 1)
3614*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
3615*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3616673c5308Schenli //
vfrstpi_h(v8i16 _1,v8i16 _2)3617673c5308Schenli v8i16 vfrstpi_h(v8i16 _1, v8i16 _2) { return __lsx_vfrstpi_h(_1, _2, 1); }
3618673c5308Schenli // CHECK-LABEL: @vfrstp_b(
3619673c5308Schenli // CHECK-NEXT:  entry:
3620*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
3621*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
3622*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <16 x i8>
3623*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vfrstp.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]], <16 x i8> [[TMP2]])
3624*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <16 x i8> [[TMP3]] to i128
3625*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
3626673c5308Schenli //
vfrstp_b(v16i8 _1,v16i8 _2,v16i8 _3)3627673c5308Schenli v16i8 vfrstp_b(v16i8 _1, v16i8 _2, v16i8 _3) {
3628673c5308Schenli   return __lsx_vfrstp_b(_1, _2, _3);
3629673c5308Schenli }
3630673c5308Schenli // CHECK-LABEL: @vfrstp_h(
3631673c5308Schenli // CHECK-NEXT:  entry:
3632*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
3633*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
3634*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <8 x i16>
3635*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vfrstp.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]], <8 x i16> [[TMP2]])
3636*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <8 x i16> [[TMP3]] to i128
3637*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
3638673c5308Schenli //
vfrstp_h(v8i16 _1,v8i16 _2,v8i16 _3)3639673c5308Schenli v8i16 vfrstp_h(v8i16 _1, v8i16 _2, v8i16 _3) {
3640673c5308Schenli   return __lsx_vfrstp_h(_1, _2, _3);
3641673c5308Schenli }
3642673c5308Schenli // CHECK-LABEL: @vshuf4i_d(
3643673c5308Schenli // CHECK-NEXT:  entry:
3644*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
3645*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
3646*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vshuf4i.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]], i32 1)
3647*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
3648*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3649673c5308Schenli //
vshuf4i_d(v2i64 _1,v2i64 _2)3650673c5308Schenli v2i64 vshuf4i_d(v2i64 _1, v2i64 _2) { return __lsx_vshuf4i_d(_1, _2, 1); }
3651673c5308Schenli // CHECK-LABEL: @vbsrl_v(
3652673c5308Schenli // CHECK-NEXT:  entry:
3653*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
3654*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vbsrl.v(<16 x i8> [[TMP0]], i32 1)
3655*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
3656*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3657673c5308Schenli //
vbsrl_v(v16i8 _1)3658673c5308Schenli v16i8 vbsrl_v(v16i8 _1) { return __lsx_vbsrl_v(_1, 1); }
3659673c5308Schenli // CHECK-LABEL: @vbsll_v(
3660673c5308Schenli // CHECK-NEXT:  entry:
3661*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
3662*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vbsll.v(<16 x i8> [[TMP0]], i32 1)
3663*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
3664*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3665673c5308Schenli //
vbsll_v(v16i8 _1)3666673c5308Schenli v16i8 vbsll_v(v16i8 _1) { return __lsx_vbsll_v(_1, 1); }
3667673c5308Schenli // CHECK-LABEL: @vextrins_b(
3668673c5308Schenli // CHECK-NEXT:  entry:
3669*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
3670*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
3671*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vextrins.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]], i32 1)
3672*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
3673*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3674673c5308Schenli //
vextrins_b(v16i8 _1,v16i8 _2)3675673c5308Schenli v16i8 vextrins_b(v16i8 _1, v16i8 _2) { return __lsx_vextrins_b(_1, _2, 1); }
3676673c5308Schenli // CHECK-LABEL: @vextrins_h(
3677673c5308Schenli // CHECK-NEXT:  entry:
3678*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
3679*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
3680*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vextrins.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]], i32 1)
3681*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
3682*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3683673c5308Schenli //
vextrins_h(v8i16 _1,v8i16 _2)3684673c5308Schenli v8i16 vextrins_h(v8i16 _1, v8i16 _2) { return __lsx_vextrins_h(_1, _2, 1); }
3685673c5308Schenli // CHECK-LABEL: @vextrins_w(
3686673c5308Schenli // CHECK-NEXT:  entry:
3687*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
3688*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
3689*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vextrins.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]], i32 1)
3690*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
3691*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3692673c5308Schenli //
vextrins_w(v4i32 _1,v4i32 _2)3693673c5308Schenli v4i32 vextrins_w(v4i32 _1, v4i32 _2) { return __lsx_vextrins_w(_1, _2, 1); }
3694673c5308Schenli // CHECK-LABEL: @vextrins_d(
3695673c5308Schenli // CHECK-NEXT:  entry:
3696*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
3697*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
3698*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vextrins.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]], i32 1)
3699*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
3700*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3701673c5308Schenli //
vextrins_d(v2i64 _1,v2i64 _2)3702673c5308Schenli v2i64 vextrins_d(v2i64 _1, v2i64 _2) { return __lsx_vextrins_d(_1, _2, 1); }
3703673c5308Schenli // CHECK-LABEL: @vmskltz_b(
3704673c5308Schenli // CHECK-NEXT:  entry:
3705*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
3706*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmskltz.b(<16 x i8> [[TMP0]])
3707*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
3708*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3709673c5308Schenli //
vmskltz_b(v16i8 _1)3710673c5308Schenli v16i8 vmskltz_b(v16i8 _1) { return __lsx_vmskltz_b(_1); }
3711673c5308Schenli // CHECK-LABEL: @vmskltz_h(
3712673c5308Schenli // CHECK-NEXT:  entry:
3713*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
3714*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmskltz.h(<8 x i16> [[TMP0]])
3715*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
3716*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3717673c5308Schenli //
vmskltz_h(v8i16 _1)3718673c5308Schenli v8i16 vmskltz_h(v8i16 _1) { return __lsx_vmskltz_h(_1); }
3719673c5308Schenli // CHECK-LABEL: @vmskltz_w(
3720673c5308Schenli // CHECK-NEXT:  entry:
3721*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
3722*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmskltz.w(<4 x i32> [[TMP0]])
3723*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
3724*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3725673c5308Schenli //
vmskltz_w(v4i32 _1)3726673c5308Schenli v4i32 vmskltz_w(v4i32 _1) { return __lsx_vmskltz_w(_1); }
3727673c5308Schenli // CHECK-LABEL: @vmskltz_d(
3728673c5308Schenli // CHECK-NEXT:  entry:
3729*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
3730*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmskltz.d(<2 x i64> [[TMP0]])
3731*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
3732*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3733673c5308Schenli //
vmskltz_d(v2i64 _1)3734673c5308Schenli v2i64 vmskltz_d(v2i64 _1) { return __lsx_vmskltz_d(_1); }
3735673c5308Schenli // CHECK-LABEL: @vsigncov_b(
3736673c5308Schenli // CHECK-NEXT:  entry:
3737*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
3738*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
3739*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsigncov.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
3740*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
3741*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3742673c5308Schenli //
vsigncov_b(v16i8 _1,v16i8 _2)3743673c5308Schenli v16i8 vsigncov_b(v16i8 _1, v16i8 _2) { return __lsx_vsigncov_b(_1, _2); }
3744673c5308Schenli // CHECK-LABEL: @vsigncov_h(
3745673c5308Schenli // CHECK-NEXT:  entry:
3746*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
3747*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
3748*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsigncov.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
3749*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
3750*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3751673c5308Schenli //
vsigncov_h(v8i16 _1,v8i16 _2)3752673c5308Schenli v8i16 vsigncov_h(v8i16 _1, v8i16 _2) { return __lsx_vsigncov_h(_1, _2); }
3753673c5308Schenli // CHECK-LABEL: @vsigncov_w(
3754673c5308Schenli // CHECK-NEXT:  entry:
3755*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
3756*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
3757*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsigncov.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
3758*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
3759*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3760673c5308Schenli //
vsigncov_w(v4i32 _1,v4i32 _2)3761673c5308Schenli v4i32 vsigncov_w(v4i32 _1, v4i32 _2) { return __lsx_vsigncov_w(_1, _2); }
3762673c5308Schenli // CHECK-LABEL: @vsigncov_d(
3763673c5308Schenli // CHECK-NEXT:  entry:
3764*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
3765*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
3766*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsigncov.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
3767*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
3768*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3769673c5308Schenli //
vsigncov_d(v2i64 _1,v2i64 _2)3770673c5308Schenli v2i64 vsigncov_d(v2i64 _1, v2i64 _2) { return __lsx_vsigncov_d(_1, _2); }
3771673c5308Schenli // CHECK-LABEL: @vfmadd_s(
3772673c5308Schenli // CHECK-NEXT:  entry:
3773*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
3774*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
3775*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <4 x float>
3776*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfmadd.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]], <4 x float> [[TMP2]])
3777*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <4 x float> [[TMP3]] to i128
3778*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
3779673c5308Schenli //
vfmadd_s(v4f32 _1,v4f32 _2,v4f32 _3)3780673c5308Schenli v4f32 vfmadd_s(v4f32 _1, v4f32 _2, v4f32 _3) {
3781673c5308Schenli   return __lsx_vfmadd_s(_1, _2, _3);
3782673c5308Schenli }
3783673c5308Schenli // CHECK-LABEL: @vfmadd_d(
3784673c5308Schenli // CHECK-NEXT:  entry:
3785*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
3786*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
3787*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <2 x double>
3788*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfmadd.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]], <2 x double> [[TMP2]])
3789*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <2 x double> [[TMP3]] to i128
3790*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
3791673c5308Schenli //
vfmadd_d(v2f64 _1,v2f64 _2,v2f64 _3)3792673c5308Schenli v2f64 vfmadd_d(v2f64 _1, v2f64 _2, v2f64 _3) {
3793673c5308Schenli   return __lsx_vfmadd_d(_1, _2, _3);
3794673c5308Schenli }
3795673c5308Schenli // CHECK-LABEL: @vfmsub_s(
3796673c5308Schenli // CHECK-NEXT:  entry:
3797*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
3798*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
3799*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <4 x float>
3800*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfmsub.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]], <4 x float> [[TMP2]])
3801*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <4 x float> [[TMP3]] to i128
3802*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
3803673c5308Schenli //
vfmsub_s(v4f32 _1,v4f32 _2,v4f32 _3)3804673c5308Schenli v4f32 vfmsub_s(v4f32 _1, v4f32 _2, v4f32 _3) {
3805673c5308Schenli   return __lsx_vfmsub_s(_1, _2, _3);
3806673c5308Schenli }
3807673c5308Schenli // CHECK-LABEL: @vfmsub_d(
3808673c5308Schenli // CHECK-NEXT:  entry:
3809*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
3810*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
3811*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <2 x double>
3812*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfmsub.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]], <2 x double> [[TMP2]])
3813*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <2 x double> [[TMP3]] to i128
3814*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
3815673c5308Schenli //
vfmsub_d(v2f64 _1,v2f64 _2,v2f64 _3)3816673c5308Schenli v2f64 vfmsub_d(v2f64 _1, v2f64 _2, v2f64 _3) {
3817673c5308Schenli   return __lsx_vfmsub_d(_1, _2, _3);
3818673c5308Schenli }
3819673c5308Schenli // CHECK-LABEL: @vfnmadd_s(
3820673c5308Schenli // CHECK-NEXT:  entry:
3821*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
3822*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
3823*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <4 x float>
3824*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfnmadd.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]], <4 x float> [[TMP2]])
3825*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <4 x float> [[TMP3]] to i128
3826*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
3827673c5308Schenli //
vfnmadd_s(v4f32 _1,v4f32 _2,v4f32 _3)3828673c5308Schenli v4f32 vfnmadd_s(v4f32 _1, v4f32 _2, v4f32 _3) {
3829673c5308Schenli   return __lsx_vfnmadd_s(_1, _2, _3);
3830673c5308Schenli }
3831673c5308Schenli // CHECK-LABEL: @vfnmadd_d(
3832673c5308Schenli // CHECK-NEXT:  entry:
3833*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
3834*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
3835*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <2 x double>
3836*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfnmadd.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]], <2 x double> [[TMP2]])
3837*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <2 x double> [[TMP3]] to i128
3838*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
3839673c5308Schenli //
vfnmadd_d(v2f64 _1,v2f64 _2,v2f64 _3)3840673c5308Schenli v2f64 vfnmadd_d(v2f64 _1, v2f64 _2, v2f64 _3) {
3841673c5308Schenli   return __lsx_vfnmadd_d(_1, _2, _3);
3842673c5308Schenli }
3843673c5308Schenli // CHECK-LABEL: @vfnmsub_s(
3844673c5308Schenli // CHECK-NEXT:  entry:
3845*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
3846*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
3847*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <4 x float>
3848*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfnmsub.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]], <4 x float> [[TMP2]])
3849*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <4 x float> [[TMP3]] to i128
3850*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
3851673c5308Schenli //
vfnmsub_s(v4f32 _1,v4f32 _2,v4f32 _3)3852673c5308Schenli v4f32 vfnmsub_s(v4f32 _1, v4f32 _2, v4f32 _3) {
3853673c5308Schenli   return __lsx_vfnmsub_s(_1, _2, _3);
3854673c5308Schenli }
3855673c5308Schenli // CHECK-LABEL: @vfnmsub_d(
3856673c5308Schenli // CHECK-NEXT:  entry:
3857*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
3858*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
3859*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <2 x double>
3860*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfnmsub.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]], <2 x double> [[TMP2]])
3861*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <2 x double> [[TMP3]] to i128
3862*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
3863673c5308Schenli //
vfnmsub_d(v2f64 _1,v2f64 _2,v2f64 _3)3864673c5308Schenli v2f64 vfnmsub_d(v2f64 _1, v2f64 _2, v2f64 _3) {
3865673c5308Schenli   return __lsx_vfnmsub_d(_1, _2, _3);
3866673c5308Schenli }
3867673c5308Schenli // CHECK-LABEL: @vftintrne_w_s(
3868673c5308Schenli // CHECK-NEXT:  entry:
3869*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
3870*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vftintrne.w.s(<4 x float> [[TMP0]])
3871*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
3872*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3873673c5308Schenli //
vftintrne_w_s(v4f32 _1)3874673c5308Schenli v4i32 vftintrne_w_s(v4f32 _1) { return __lsx_vftintrne_w_s(_1); }
3875673c5308Schenli // CHECK-LABEL: @vftintrne_l_d(
3876673c5308Schenli // CHECK-NEXT:  entry:
3877*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
3878*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vftintrne.l.d(<2 x double> [[TMP0]])
3879*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
3880*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3881673c5308Schenli //
vftintrne_l_d(v2f64 _1)3882673c5308Schenli v2i64 vftintrne_l_d(v2f64 _1) { return __lsx_vftintrne_l_d(_1); }
3883673c5308Schenli // CHECK-LABEL: @vftintrp_w_s(
3884673c5308Schenli // CHECK-NEXT:  entry:
3885*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
3886*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vftintrp.w.s(<4 x float> [[TMP0]])
3887*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
3888*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3889673c5308Schenli //
vftintrp_w_s(v4f32 _1)3890673c5308Schenli v4i32 vftintrp_w_s(v4f32 _1) { return __lsx_vftintrp_w_s(_1); }
3891673c5308Schenli // CHECK-LABEL: @vftintrp_l_d(
3892673c5308Schenli // CHECK-NEXT:  entry:
3893*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
3894*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vftintrp.l.d(<2 x double> [[TMP0]])
3895*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
3896*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3897673c5308Schenli //
vftintrp_l_d(v2f64 _1)3898673c5308Schenli v2i64 vftintrp_l_d(v2f64 _1) { return __lsx_vftintrp_l_d(_1); }
3899673c5308Schenli // CHECK-LABEL: @vftintrm_w_s(
3900673c5308Schenli // CHECK-NEXT:  entry:
3901*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
3902*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vftintrm.w.s(<4 x float> [[TMP0]])
3903*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
3904*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3905673c5308Schenli //
vftintrm_w_s(v4f32 _1)3906673c5308Schenli v4i32 vftintrm_w_s(v4f32 _1) { return __lsx_vftintrm_w_s(_1); }
3907673c5308Schenli // CHECK-LABEL: @vftintrm_l_d(
3908673c5308Schenli // CHECK-NEXT:  entry:
3909*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
3910*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vftintrm.l.d(<2 x double> [[TMP0]])
3911*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
3912*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3913673c5308Schenli //
vftintrm_l_d(v2f64 _1)3914673c5308Schenli v2i64 vftintrm_l_d(v2f64 _1) { return __lsx_vftintrm_l_d(_1); }
3915673c5308Schenli // CHECK-LABEL: @vftint_w_d(
3916673c5308Schenli // CHECK-NEXT:  entry:
3917*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
3918*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
3919*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vftint.w.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
3920*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
3921*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3922673c5308Schenli //
vftint_w_d(v2f64 _1,v2f64 _2)3923673c5308Schenli v4i32 vftint_w_d(v2f64 _1, v2f64 _2) { return __lsx_vftint_w_d(_1, _2); }
3924673c5308Schenli // CHECK-LABEL: @vffint_s_l(
3925673c5308Schenli // CHECK-NEXT:  entry:
3926*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
3927*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
3928*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vffint.s.l(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
3929*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x float> [[TMP2]] to i128
3930*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3931673c5308Schenli //
vffint_s_l(v2i64 _1,v2i64 _2)3932673c5308Schenli v4f32 vffint_s_l(v2i64 _1, v2i64 _2) { return __lsx_vffint_s_l(_1, _2); }
3933673c5308Schenli // CHECK-LABEL: @vftintrz_w_d(
3934673c5308Schenli // CHECK-NEXT:  entry:
3935*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
3936*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
3937*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vftintrz.w.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
3938*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
3939*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3940673c5308Schenli //
vftintrz_w_d(v2f64 _1,v2f64 _2)3941673c5308Schenli v4i32 vftintrz_w_d(v2f64 _1, v2f64 _2) { return __lsx_vftintrz_w_d(_1, _2); }
3942673c5308Schenli // CHECK-LABEL: @vftintrp_w_d(
3943673c5308Schenli // CHECK-NEXT:  entry:
3944*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
3945*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
3946*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vftintrp.w.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
3947*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
3948*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3949673c5308Schenli //
vftintrp_w_d(v2f64 _1,v2f64 _2)3950673c5308Schenli v4i32 vftintrp_w_d(v2f64 _1, v2f64 _2) { return __lsx_vftintrp_w_d(_1, _2); }
3951673c5308Schenli // CHECK-LABEL: @vftintrm_w_d(
3952673c5308Schenli // CHECK-NEXT:  entry:
3953*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
3954*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
3955*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vftintrm.w.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
3956*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
3957*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3958673c5308Schenli //
vftintrm_w_d(v2f64 _1,v2f64 _2)3959673c5308Schenli v4i32 vftintrm_w_d(v2f64 _1, v2f64 _2) { return __lsx_vftintrm_w_d(_1, _2); }
3960673c5308Schenli // CHECK-LABEL: @vftintrne_w_d(
3961673c5308Schenli // CHECK-NEXT:  entry:
3962*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
3963*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
3964*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vftintrne.w.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
3965*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
3966*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
3967673c5308Schenli //
vftintrne_w_d(v2f64 _1,v2f64 _2)3968673c5308Schenli v4i32 vftintrne_w_d(v2f64 _1, v2f64 _2) { return __lsx_vftintrne_w_d(_1, _2); }
3969673c5308Schenli // CHECK-LABEL: @vftintl_l_s(
3970673c5308Schenli // CHECK-NEXT:  entry:
3971*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
3972*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vftintl.l.s(<4 x float> [[TMP0]])
3973*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
3974*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3975673c5308Schenli //
vftintl_l_s(v4f32 _1)3976673c5308Schenli v2i64 vftintl_l_s(v4f32 _1) { return __lsx_vftintl_l_s(_1); }
3977673c5308Schenli // CHECK-LABEL: @vftinth_l_s(
3978673c5308Schenli // CHECK-NEXT:  entry:
3979*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
3980*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vftinth.l.s(<4 x float> [[TMP0]])
3981*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
3982*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3983673c5308Schenli //
vftinth_l_s(v4f32 _1)3984673c5308Schenli v2i64 vftinth_l_s(v4f32 _1) { return __lsx_vftinth_l_s(_1); }
3985673c5308Schenli // CHECK-LABEL: @vffinth_d_w(
3986673c5308Schenli // CHECK-NEXT:  entry:
3987*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
3988*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vffinth.d.w(<4 x i32> [[TMP0]])
3989*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x double> [[TMP1]] to i128
3990*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3991673c5308Schenli //
vffinth_d_w(v4i32 _1)3992673c5308Schenli v2f64 vffinth_d_w(v4i32 _1) { return __lsx_vffinth_d_w(_1); }
3993673c5308Schenli // CHECK-LABEL: @vffintl_d_w(
3994673c5308Schenli // CHECK-NEXT:  entry:
3995*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
3996*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vffintl.d.w(<4 x i32> [[TMP0]])
3997*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x double> [[TMP1]] to i128
3998*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
3999673c5308Schenli //
vffintl_d_w(v4i32 _1)4000673c5308Schenli v2f64 vffintl_d_w(v4i32 _1) { return __lsx_vffintl_d_w(_1); }
4001673c5308Schenli // CHECK-LABEL: @vftintrzl_l_s(
4002673c5308Schenli // CHECK-NEXT:  entry:
4003*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
4004*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vftintrzl.l.s(<4 x float> [[TMP0]])
4005*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
4006*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
4007673c5308Schenli //
vftintrzl_l_s(v4f32 _1)4008673c5308Schenli v2i64 vftintrzl_l_s(v4f32 _1) { return __lsx_vftintrzl_l_s(_1); }
4009673c5308Schenli // CHECK-LABEL: @vftintrzh_l_s(
4010673c5308Schenli // CHECK-NEXT:  entry:
4011*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
4012*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vftintrzh.l.s(<4 x float> [[TMP0]])
4013*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
4014*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
4015673c5308Schenli //
vftintrzh_l_s(v4f32 _1)4016673c5308Schenli v2i64 vftintrzh_l_s(v4f32 _1) { return __lsx_vftintrzh_l_s(_1); }
4017673c5308Schenli // CHECK-LABEL: @vftintrpl_l_s(
4018673c5308Schenli // CHECK-NEXT:  entry:
4019*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
4020*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vftintrpl.l.s(<4 x float> [[TMP0]])
4021*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
4022*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
4023673c5308Schenli //
vftintrpl_l_s(v4f32 _1)4024673c5308Schenli v2i64 vftintrpl_l_s(v4f32 _1) { return __lsx_vftintrpl_l_s(_1); }
4025673c5308Schenli // CHECK-LABEL: @vftintrph_l_s(
4026673c5308Schenli // CHECK-NEXT:  entry:
4027*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
4028*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vftintrph.l.s(<4 x float> [[TMP0]])
4029*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
4030*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
4031673c5308Schenli //
vftintrph_l_s(v4f32 _1)4032673c5308Schenli v2i64 vftintrph_l_s(v4f32 _1) { return __lsx_vftintrph_l_s(_1); }
4033673c5308Schenli // CHECK-LABEL: @vftintrml_l_s(
4034673c5308Schenli // CHECK-NEXT:  entry:
4035*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
4036*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vftintrml.l.s(<4 x float> [[TMP0]])
4037*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
4038*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
4039673c5308Schenli //
vftintrml_l_s(v4f32 _1)4040673c5308Schenli v2i64 vftintrml_l_s(v4f32 _1) { return __lsx_vftintrml_l_s(_1); }
4041673c5308Schenli // CHECK-LABEL: @vftintrmh_l_s(
4042673c5308Schenli // CHECK-NEXT:  entry:
4043*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
4044*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vftintrmh.l.s(<4 x float> [[TMP0]])
4045*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
4046*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
4047673c5308Schenli //
vftintrmh_l_s(v4f32 _1)4048673c5308Schenli v2i64 vftintrmh_l_s(v4f32 _1) { return __lsx_vftintrmh_l_s(_1); }
4049673c5308Schenli // CHECK-LABEL: @vftintrnel_l_s(
4050673c5308Schenli // CHECK-NEXT:  entry:
4051*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
4052*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vftintrnel.l.s(<4 x float> [[TMP0]])
4053*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
4054*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
4055673c5308Schenli //
vftintrnel_l_s(v4f32 _1)4056673c5308Schenli v2i64 vftintrnel_l_s(v4f32 _1) { return __lsx_vftintrnel_l_s(_1); }
4057673c5308Schenli // CHECK-LABEL: @vftintrneh_l_s(
4058673c5308Schenli // CHECK-NEXT:  entry:
4059*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
4060*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vftintrneh.l.s(<4 x float> [[TMP0]])
4061*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
4062*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
4063673c5308Schenli //
vftintrneh_l_s(v4f32 _1)4064673c5308Schenli v2i64 vftintrneh_l_s(v4f32 _1) { return __lsx_vftintrneh_l_s(_1); }
4065673c5308Schenli // CHECK-LABEL: @vfrintrne_s(
4066673c5308Schenli // CHECK-NEXT:  entry:
4067*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
4068*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfrintrne.s(<4 x float> [[TMP0]])
4069*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x float> [[TMP1]] to i128
4070*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
4071673c5308Schenli //
vfrintrne_s(v4f32 _1)4072673c5308Schenli v4i32 vfrintrne_s(v4f32 _1) { return __lsx_vfrintrne_s(_1); }
4073673c5308Schenli // CHECK-LABEL: @vfrintrne_d(
4074673c5308Schenli // CHECK-NEXT:  entry:
4075*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
4076*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfrintrne.d(<2 x double> [[TMP0]])
4077*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x double> [[TMP1]] to i128
4078*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
4079673c5308Schenli //
vfrintrne_d(v2f64 _1)4080673c5308Schenli v2i64 vfrintrne_d(v2f64 _1) { return __lsx_vfrintrne_d(_1); }
4081673c5308Schenli // CHECK-LABEL: @vfrintrz_s(
4082673c5308Schenli // CHECK-NEXT:  entry:
4083*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
4084*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfrintrz.s(<4 x float> [[TMP0]])
4085*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x float> [[TMP1]] to i128
4086*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
4087673c5308Schenli //
vfrintrz_s(v4f32 _1)4088673c5308Schenli v4i32 vfrintrz_s(v4f32 _1) { return __lsx_vfrintrz_s(_1); }
4089673c5308Schenli // CHECK-LABEL: @vfrintrz_d(
4090673c5308Schenli // CHECK-NEXT:  entry:
4091*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
4092*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfrintrz.d(<2 x double> [[TMP0]])
4093*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x double> [[TMP1]] to i128
4094*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
4095673c5308Schenli //
vfrintrz_d(v2f64 _1)4096673c5308Schenli v2i64 vfrintrz_d(v2f64 _1) { return __lsx_vfrintrz_d(_1); }
4097673c5308Schenli // CHECK-LABEL: @vfrintrp_s(
4098673c5308Schenli // CHECK-NEXT:  entry:
4099*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
4100*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfrintrp.s(<4 x float> [[TMP0]])
4101*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x float> [[TMP1]] to i128
4102*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
4103673c5308Schenli //
vfrintrp_s(v4f32 _1)4104673c5308Schenli v4i32 vfrintrp_s(v4f32 _1) { return __lsx_vfrintrp_s(_1); }
4105673c5308Schenli // CHECK-LABEL: @vfrintrp_d(
4106673c5308Schenli // CHECK-NEXT:  entry:
4107*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
4108*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfrintrp.d(<2 x double> [[TMP0]])
4109*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x double> [[TMP1]] to i128
4110*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
4111673c5308Schenli //
vfrintrp_d(v2f64 _1)4112673c5308Schenli v2i64 vfrintrp_d(v2f64 _1) { return __lsx_vfrintrp_d(_1); }
4113673c5308Schenli // CHECK-LABEL: @vfrintrm_s(
4114673c5308Schenli // CHECK-NEXT:  entry:
4115*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
4116*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfrintrm.s(<4 x float> [[TMP0]])
4117*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x float> [[TMP1]] to i128
4118*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
4119673c5308Schenli //
vfrintrm_s(v4f32 _1)4120673c5308Schenli v4i32 vfrintrm_s(v4f32 _1) { return __lsx_vfrintrm_s(_1); }
4121673c5308Schenli // CHECK-LABEL: @vfrintrm_d(
4122673c5308Schenli // CHECK-NEXT:  entry:
4123*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
4124*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfrintrm.d(<2 x double> [[TMP0]])
4125*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x double> [[TMP1]] to i128
4126*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
4127673c5308Schenli //
vfrintrm_d(v2f64 _1)4128673c5308Schenli v2i64 vfrintrm_d(v2f64 _1) { return __lsx_vfrintrm_d(_1); }
4129673c5308Schenli // CHECK-LABEL: @vstelm_b(
4130673c5308Schenli // CHECK-NEXT:  entry:
4131*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
4132*0e01c72cSyjijd // CHECK-NEXT:    tail call void @llvm.loongarch.lsx.vstelm.b(<16 x i8> [[TMP0]], ptr [[_2:%.*]], i32 1, i32 1)
4133673c5308Schenli // CHECK-NEXT:    ret void
4134673c5308Schenli //
vstelm_b(v16i8 _1,void * _2)4135673c5308Schenli void vstelm_b(v16i8 _1, void *_2) { return __lsx_vstelm_b(_1, _2, 1, 1); }
4136673c5308Schenli // CHECK-LABEL: @vstelm_h(
4137673c5308Schenli // CHECK-NEXT:  entry:
4138*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
4139*0e01c72cSyjijd // CHECK-NEXT:    tail call void @llvm.loongarch.lsx.vstelm.h(<8 x i16> [[TMP0]], ptr [[_2:%.*]], i32 2, i32 1)
4140673c5308Schenli // CHECK-NEXT:    ret void
4141673c5308Schenli //
vstelm_h(v8i16 _1,void * _2)4142673c5308Schenli void vstelm_h(v8i16 _1, void *_2) { return __lsx_vstelm_h(_1, _2, 2, 1); }
4143673c5308Schenli // CHECK-LABEL: @vstelm_w(
4144673c5308Schenli // CHECK-NEXT:  entry:
4145*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
4146*0e01c72cSyjijd // CHECK-NEXT:    tail call void @llvm.loongarch.lsx.vstelm.w(<4 x i32> [[TMP0]], ptr [[_2:%.*]], i32 4, i32 1)
4147673c5308Schenli // CHECK-NEXT:    ret void
4148673c5308Schenli //
vstelm_w(v4i32 _1,void * _2)4149673c5308Schenli void vstelm_w(v4i32 _1, void *_2) { return __lsx_vstelm_w(_1, _2, 4, 1); }
4150673c5308Schenli // CHECK-LABEL: @vstelm_d(
4151673c5308Schenli // CHECK-NEXT:  entry:
4152*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
4153*0e01c72cSyjijd // CHECK-NEXT:    tail call void @llvm.loongarch.lsx.vstelm.d(<2 x i64> [[TMP0]], ptr [[_2:%.*]], i32 8, i32 1)
4154673c5308Schenli // CHECK-NEXT:    ret void
4155673c5308Schenli //
vstelm_d(v2i64 _1,void * _2)4156673c5308Schenli void vstelm_d(v2i64 _1, void *_2) { return __lsx_vstelm_d(_1, _2, 8, 1); }
4157673c5308Schenli // CHECK-LABEL: @vaddwev_d_w(
4158673c5308Schenli // CHECK-NEXT:  entry:
4159*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
4160*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
4161*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vaddwev.d.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
4162*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4163*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4164673c5308Schenli //
vaddwev_d_w(v4i32 _1,v4i32 _2)4165673c5308Schenli v2i64 vaddwev_d_w(v4i32 _1, v4i32 _2) { return __lsx_vaddwev_d_w(_1, _2); }
4166673c5308Schenli // CHECK-LABEL: @vaddwev_w_h(
4167673c5308Schenli // CHECK-NEXT:  entry:
4168*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
4169*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
4170*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vaddwev.w.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
4171*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
4172*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4173673c5308Schenli //
vaddwev_w_h(v8i16 _1,v8i16 _2)4174673c5308Schenli v4i32 vaddwev_w_h(v8i16 _1, v8i16 _2) { return __lsx_vaddwev_w_h(_1, _2); }
4175673c5308Schenli // CHECK-LABEL: @vaddwev_h_b(
4176673c5308Schenli // CHECK-NEXT:  entry:
4177*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
4178*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
4179*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vaddwev.h.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
4180*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
4181*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4182673c5308Schenli //
vaddwev_h_b(v16i8 _1,v16i8 _2)4183673c5308Schenli v8i16 vaddwev_h_b(v16i8 _1, v16i8 _2) { return __lsx_vaddwev_h_b(_1, _2); }
4184673c5308Schenli // CHECK-LABEL: @vaddwod_d_w(
4185673c5308Schenli // CHECK-NEXT:  entry:
4186*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
4187*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
4188*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vaddwod.d.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
4189*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4190*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4191673c5308Schenli //
vaddwod_d_w(v4i32 _1,v4i32 _2)4192673c5308Schenli v2i64 vaddwod_d_w(v4i32 _1, v4i32 _2) { return __lsx_vaddwod_d_w(_1, _2); }
4193673c5308Schenli // CHECK-LABEL: @vaddwod_w_h(
4194673c5308Schenli // CHECK-NEXT:  entry:
4195*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
4196*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
4197*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vaddwod.w.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
4198*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
4199*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4200673c5308Schenli //
vaddwod_w_h(v8i16 _1,v8i16 _2)4201673c5308Schenli v4i32 vaddwod_w_h(v8i16 _1, v8i16 _2) { return __lsx_vaddwod_w_h(_1, _2); }
4202673c5308Schenli // CHECK-LABEL: @vaddwod_h_b(
4203673c5308Schenli // CHECK-NEXT:  entry:
4204*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
4205*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
4206*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vaddwod.h.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
4207*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
4208*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4209673c5308Schenli //
vaddwod_h_b(v16i8 _1,v16i8 _2)4210673c5308Schenli v8i16 vaddwod_h_b(v16i8 _1, v16i8 _2) { return __lsx_vaddwod_h_b(_1, _2); }
4211673c5308Schenli // CHECK-LABEL: @vaddwev_d_wu(
4212673c5308Schenli // CHECK-NEXT:  entry:
4213*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
4214*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
4215*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vaddwev.d.wu(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
4216*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4217*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4218673c5308Schenli //
vaddwev_d_wu(v4u32 _1,v4u32 _2)4219673c5308Schenli v2i64 vaddwev_d_wu(v4u32 _1, v4u32 _2) { return __lsx_vaddwev_d_wu(_1, _2); }
4220673c5308Schenli // CHECK-LABEL: @vaddwev_w_hu(
4221673c5308Schenli // CHECK-NEXT:  entry:
4222*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
4223*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
4224*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vaddwev.w.hu(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
4225*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
4226*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4227673c5308Schenli //
vaddwev_w_hu(v8u16 _1,v8u16 _2)4228673c5308Schenli v4i32 vaddwev_w_hu(v8u16 _1, v8u16 _2) { return __lsx_vaddwev_w_hu(_1, _2); }
4229673c5308Schenli // CHECK-LABEL: @vaddwev_h_bu(
4230673c5308Schenli // CHECK-NEXT:  entry:
4231*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
4232*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
4233*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vaddwev.h.bu(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
4234*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
4235*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4236673c5308Schenli //
vaddwev_h_bu(v16u8 _1,v16u8 _2)4237673c5308Schenli v8i16 vaddwev_h_bu(v16u8 _1, v16u8 _2) { return __lsx_vaddwev_h_bu(_1, _2); }
4238673c5308Schenli // CHECK-LABEL: @vaddwod_d_wu(
4239673c5308Schenli // CHECK-NEXT:  entry:
4240*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
4241*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
4242*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vaddwod.d.wu(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
4243*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4244*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4245673c5308Schenli //
vaddwod_d_wu(v4u32 _1,v4u32 _2)4246673c5308Schenli v2i64 vaddwod_d_wu(v4u32 _1, v4u32 _2) { return __lsx_vaddwod_d_wu(_1, _2); }
4247673c5308Schenli // CHECK-LABEL: @vaddwod_w_hu(
4248673c5308Schenli // CHECK-NEXT:  entry:
4249*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
4250*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
4251*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vaddwod.w.hu(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
4252*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
4253*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4254673c5308Schenli //
vaddwod_w_hu(v8u16 _1,v8u16 _2)4255673c5308Schenli v4i32 vaddwod_w_hu(v8u16 _1, v8u16 _2) { return __lsx_vaddwod_w_hu(_1, _2); }
4256673c5308Schenli // CHECK-LABEL: @vaddwod_h_bu(
4257673c5308Schenli // CHECK-NEXT:  entry:
4258*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
4259*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
4260*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vaddwod.h.bu(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
4261*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
4262*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4263673c5308Schenli //
vaddwod_h_bu(v16u8 _1,v16u8 _2)4264673c5308Schenli v8i16 vaddwod_h_bu(v16u8 _1, v16u8 _2) { return __lsx_vaddwod_h_bu(_1, _2); }
4265673c5308Schenli // CHECK-LABEL: @vaddwev_d_wu_w(
4266673c5308Schenli // CHECK-NEXT:  entry:
4267*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
4268*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
4269*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vaddwev.d.wu.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
4270*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4271*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4272673c5308Schenli //
vaddwev_d_wu_w(v4u32 _1,v4i32 _2)4273673c5308Schenli v2i64 vaddwev_d_wu_w(v4u32 _1, v4i32 _2) {
4274673c5308Schenli   return __lsx_vaddwev_d_wu_w(_1, _2);
4275673c5308Schenli }
4276673c5308Schenli // CHECK-LABEL: @vaddwev_w_hu_h(
4277673c5308Schenli // CHECK-NEXT:  entry:
4278*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
4279*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
4280*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vaddwev.w.hu.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
4281*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
4282*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4283673c5308Schenli //
vaddwev_w_hu_h(v8u16 _1,v8i16 _2)4284673c5308Schenli v4i32 vaddwev_w_hu_h(v8u16 _1, v8i16 _2) {
4285673c5308Schenli   return __lsx_vaddwev_w_hu_h(_1, _2);
4286673c5308Schenli }
4287673c5308Schenli // CHECK-LABEL: @vaddwev_h_bu_b(
4288673c5308Schenli // CHECK-NEXT:  entry:
4289*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
4290*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
4291*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vaddwev.h.bu.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
4292*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
4293*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4294673c5308Schenli //
vaddwev_h_bu_b(v16u8 _1,v16i8 _2)4295673c5308Schenli v8i16 vaddwev_h_bu_b(v16u8 _1, v16i8 _2) {
4296673c5308Schenli   return __lsx_vaddwev_h_bu_b(_1, _2);
4297673c5308Schenli }
4298673c5308Schenli // CHECK-LABEL: @vaddwod_d_wu_w(
4299673c5308Schenli // CHECK-NEXT:  entry:
4300*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
4301*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
4302*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vaddwod.d.wu.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
4303*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4304*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4305673c5308Schenli //
vaddwod_d_wu_w(v4u32 _1,v4i32 _2)4306673c5308Schenli v2i64 vaddwod_d_wu_w(v4u32 _1, v4i32 _2) {
4307673c5308Schenli   return __lsx_vaddwod_d_wu_w(_1, _2);
4308673c5308Schenli }
4309673c5308Schenli // CHECK-LABEL: @vaddwod_w_hu_h(
4310673c5308Schenli // CHECK-NEXT:  entry:
4311*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
4312*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
4313*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vaddwod.w.hu.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
4314*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
4315*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4316673c5308Schenli //
vaddwod_w_hu_h(v8u16 _1,v8i16 _2)4317673c5308Schenli v4i32 vaddwod_w_hu_h(v8u16 _1, v8i16 _2) {
4318673c5308Schenli   return __lsx_vaddwod_w_hu_h(_1, _2);
4319673c5308Schenli }
4320673c5308Schenli // CHECK-LABEL: @vaddwod_h_bu_b(
4321673c5308Schenli // CHECK-NEXT:  entry:
4322*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
4323*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
4324*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vaddwod.h.bu.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
4325*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
4326*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4327673c5308Schenli //
vaddwod_h_bu_b(v16u8 _1,v16i8 _2)4328673c5308Schenli v8i16 vaddwod_h_bu_b(v16u8 _1, v16i8 _2) {
4329673c5308Schenli   return __lsx_vaddwod_h_bu_b(_1, _2);
4330673c5308Schenli }
4331673c5308Schenli // CHECK-LABEL: @vsubwev_d_w(
4332673c5308Schenli // CHECK-NEXT:  entry:
4333*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
4334*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
4335*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsubwev.d.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
4336*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4337*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4338673c5308Schenli //
vsubwev_d_w(v4i32 _1,v4i32 _2)4339673c5308Schenli v2i64 vsubwev_d_w(v4i32 _1, v4i32 _2) { return __lsx_vsubwev_d_w(_1, _2); }
4340673c5308Schenli // CHECK-LABEL: @vsubwev_w_h(
4341673c5308Schenli // CHECK-NEXT:  entry:
4342*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
4343*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
4344*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsubwev.w.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
4345*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
4346*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4347673c5308Schenli //
vsubwev_w_h(v8i16 _1,v8i16 _2)4348673c5308Schenli v4i32 vsubwev_w_h(v8i16 _1, v8i16 _2) { return __lsx_vsubwev_w_h(_1, _2); }
4349673c5308Schenli // CHECK-LABEL: @vsubwev_h_b(
4350673c5308Schenli // CHECK-NEXT:  entry:
4351*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
4352*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
4353*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsubwev.h.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
4354*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
4355*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4356673c5308Schenli //
vsubwev_h_b(v16i8 _1,v16i8 _2)4357673c5308Schenli v8i16 vsubwev_h_b(v16i8 _1, v16i8 _2) { return __lsx_vsubwev_h_b(_1, _2); }
4358673c5308Schenli // CHECK-LABEL: @vsubwod_d_w(
4359673c5308Schenli // CHECK-NEXT:  entry:
4360*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
4361*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
4362*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsubwod.d.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
4363*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4364*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4365673c5308Schenli //
vsubwod_d_w(v4i32 _1,v4i32 _2)4366673c5308Schenli v2i64 vsubwod_d_w(v4i32 _1, v4i32 _2) { return __lsx_vsubwod_d_w(_1, _2); }
4367673c5308Schenli // CHECK-LABEL: @vsubwod_w_h(
4368673c5308Schenli // CHECK-NEXT:  entry:
4369*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
4370*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
4371*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsubwod.w.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
4372*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
4373*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4374673c5308Schenli //
vsubwod_w_h(v8i16 _1,v8i16 _2)4375673c5308Schenli v4i32 vsubwod_w_h(v8i16 _1, v8i16 _2) { return __lsx_vsubwod_w_h(_1, _2); }
4376673c5308Schenli // CHECK-LABEL: @vsubwod_h_b(
4377673c5308Schenli // CHECK-NEXT:  entry:
4378*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
4379*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
4380*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsubwod.h.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
4381*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
4382*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4383673c5308Schenli //
vsubwod_h_b(v16i8 _1,v16i8 _2)4384673c5308Schenli v8i16 vsubwod_h_b(v16i8 _1, v16i8 _2) { return __lsx_vsubwod_h_b(_1, _2); }
4385673c5308Schenli // CHECK-LABEL: @vsubwev_d_wu(
4386673c5308Schenli // CHECK-NEXT:  entry:
4387*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
4388*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
4389*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsubwev.d.wu(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
4390*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4391*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4392673c5308Schenli //
vsubwev_d_wu(v4u32 _1,v4u32 _2)4393673c5308Schenli v2i64 vsubwev_d_wu(v4u32 _1, v4u32 _2) { return __lsx_vsubwev_d_wu(_1, _2); }
4394673c5308Schenli // CHECK-LABEL: @vsubwev_w_hu(
4395673c5308Schenli // CHECK-NEXT:  entry:
4396*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
4397*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
4398*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsubwev.w.hu(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
4399*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
4400*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4401673c5308Schenli //
vsubwev_w_hu(v8u16 _1,v8u16 _2)4402673c5308Schenli v4i32 vsubwev_w_hu(v8u16 _1, v8u16 _2) { return __lsx_vsubwev_w_hu(_1, _2); }
4403673c5308Schenli // CHECK-LABEL: @vsubwev_h_bu(
4404673c5308Schenli // CHECK-NEXT:  entry:
4405*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
4406*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
4407*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsubwev.h.bu(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
4408*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
4409*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4410673c5308Schenli //
vsubwev_h_bu(v16u8 _1,v16u8 _2)4411673c5308Schenli v8i16 vsubwev_h_bu(v16u8 _1, v16u8 _2) { return __lsx_vsubwev_h_bu(_1, _2); }
4412673c5308Schenli // CHECK-LABEL: @vsubwod_d_wu(
4413673c5308Schenli // CHECK-NEXT:  entry:
4414*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
4415*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
4416*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsubwod.d.wu(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
4417*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4418*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4419673c5308Schenli //
vsubwod_d_wu(v4u32 _1,v4u32 _2)4420673c5308Schenli v2i64 vsubwod_d_wu(v4u32 _1, v4u32 _2) { return __lsx_vsubwod_d_wu(_1, _2); }
4421673c5308Schenli // CHECK-LABEL: @vsubwod_w_hu(
4422673c5308Schenli // CHECK-NEXT:  entry:
4423*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
4424*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
4425*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsubwod.w.hu(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
4426*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
4427*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4428673c5308Schenli //
vsubwod_w_hu(v8u16 _1,v8u16 _2)4429673c5308Schenli v4i32 vsubwod_w_hu(v8u16 _1, v8u16 _2) { return __lsx_vsubwod_w_hu(_1, _2); }
4430673c5308Schenli // CHECK-LABEL: @vsubwod_h_bu(
4431673c5308Schenli // CHECK-NEXT:  entry:
4432*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
4433*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
4434*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsubwod.h.bu(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
4435*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
4436*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4437673c5308Schenli //
vsubwod_h_bu(v16u8 _1,v16u8 _2)4438673c5308Schenli v8i16 vsubwod_h_bu(v16u8 _1, v16u8 _2) { return __lsx_vsubwod_h_bu(_1, _2); }
4439673c5308Schenli // CHECK-LABEL: @vaddwev_q_d(
4440673c5308Schenli // CHECK-NEXT:  entry:
4441*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
4442*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
4443*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vaddwev.q.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
4444*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4445*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4446673c5308Schenli //
vaddwev_q_d(v2i64 _1,v2i64 _2)4447673c5308Schenli v2i64 vaddwev_q_d(v2i64 _1, v2i64 _2) { return __lsx_vaddwev_q_d(_1, _2); }
4448673c5308Schenli // CHECK-LABEL: @vaddwod_q_d(
4449673c5308Schenli // CHECK-NEXT:  entry:
4450*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
4451*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
4452*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vaddwod.q.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
4453*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4454*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4455673c5308Schenli //
vaddwod_q_d(v2i64 _1,v2i64 _2)4456673c5308Schenli v2i64 vaddwod_q_d(v2i64 _1, v2i64 _2) { return __lsx_vaddwod_q_d(_1, _2); }
4457673c5308Schenli // CHECK-LABEL: @vaddwev_q_du(
4458673c5308Schenli // CHECK-NEXT:  entry:
4459*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
4460*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
4461*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vaddwev.q.du(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
4462*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4463*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4464673c5308Schenli //
vaddwev_q_du(v2u64 _1,v2u64 _2)4465673c5308Schenli v2i64 vaddwev_q_du(v2u64 _1, v2u64 _2) { return __lsx_vaddwev_q_du(_1, _2); }
4466673c5308Schenli // CHECK-LABEL: @vaddwod_q_du(
4467673c5308Schenli // CHECK-NEXT:  entry:
4468*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
4469*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
4470*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vaddwod.q.du(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
4471*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4472*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4473673c5308Schenli //
vaddwod_q_du(v2u64 _1,v2u64 _2)4474673c5308Schenli v2i64 vaddwod_q_du(v2u64 _1, v2u64 _2) { return __lsx_vaddwod_q_du(_1, _2); }
4475673c5308Schenli // CHECK-LABEL: @vsubwev_q_d(
4476673c5308Schenli // CHECK-NEXT:  entry:
4477*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
4478*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
4479*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsubwev.q.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
4480*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4481*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4482673c5308Schenli //
vsubwev_q_d(v2i64 _1,v2i64 _2)4483673c5308Schenli v2i64 vsubwev_q_d(v2i64 _1, v2i64 _2) { return __lsx_vsubwev_q_d(_1, _2); }
4484673c5308Schenli // CHECK-LABEL: @vsubwod_q_d(
4485673c5308Schenli // CHECK-NEXT:  entry:
4486*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
4487*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
4488*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsubwod.q.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
4489*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4490*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4491673c5308Schenli //
vsubwod_q_d(v2i64 _1,v2i64 _2)4492673c5308Schenli v2i64 vsubwod_q_d(v2i64 _1, v2i64 _2) { return __lsx_vsubwod_q_d(_1, _2); }
4493673c5308Schenli // CHECK-LABEL: @vsubwev_q_du(
4494673c5308Schenli // CHECK-NEXT:  entry:
4495*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
4496*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
4497*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsubwev.q.du(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
4498*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4499*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4500673c5308Schenli //
vsubwev_q_du(v2u64 _1,v2u64 _2)4501673c5308Schenli v2i64 vsubwev_q_du(v2u64 _1, v2u64 _2) { return __lsx_vsubwev_q_du(_1, _2); }
4502673c5308Schenli // CHECK-LABEL: @vsubwod_q_du(
4503673c5308Schenli // CHECK-NEXT:  entry:
4504*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
4505*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
4506*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsubwod.q.du(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
4507*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4508*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4509673c5308Schenli //
vsubwod_q_du(v2u64 _1,v2u64 _2)4510673c5308Schenli v2i64 vsubwod_q_du(v2u64 _1, v2u64 _2) { return __lsx_vsubwod_q_du(_1, _2); }
4511673c5308Schenli // CHECK-LABEL: @vaddwev_q_du_d(
4512673c5308Schenli // CHECK-NEXT:  entry:
4513*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
4514*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
4515*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vaddwev.q.du.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
4516*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4517*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4518673c5308Schenli //
vaddwev_q_du_d(v2u64 _1,v2i64 _2)4519673c5308Schenli v2i64 vaddwev_q_du_d(v2u64 _1, v2i64 _2) {
4520673c5308Schenli   return __lsx_vaddwev_q_du_d(_1, _2);
4521673c5308Schenli }
4522673c5308Schenli // CHECK-LABEL: @vaddwod_q_du_d(
4523673c5308Schenli // CHECK-NEXT:  entry:
4524*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
4525*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
4526*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vaddwod.q.du.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
4527*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4528*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4529673c5308Schenli //
vaddwod_q_du_d(v2u64 _1,v2i64 _2)4530673c5308Schenli v2i64 vaddwod_q_du_d(v2u64 _1, v2i64 _2) {
4531673c5308Schenli   return __lsx_vaddwod_q_du_d(_1, _2);
4532673c5308Schenli }
4533673c5308Schenli // CHECK-LABEL: @vmulwev_d_w(
4534673c5308Schenli // CHECK-NEXT:  entry:
4535*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
4536*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
4537*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmulwev.d.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
4538*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4539*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4540673c5308Schenli //
vmulwev_d_w(v4i32 _1,v4i32 _2)4541673c5308Schenli v2i64 vmulwev_d_w(v4i32 _1, v4i32 _2) { return __lsx_vmulwev_d_w(_1, _2); }
4542673c5308Schenli // CHECK-LABEL: @vmulwev_w_h(
4543673c5308Schenli // CHECK-NEXT:  entry:
4544*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
4545*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
4546*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmulwev.w.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
4547*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
4548*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4549673c5308Schenli //
vmulwev_w_h(v8i16 _1,v8i16 _2)4550673c5308Schenli v4i32 vmulwev_w_h(v8i16 _1, v8i16 _2) { return __lsx_vmulwev_w_h(_1, _2); }
4551673c5308Schenli // CHECK-LABEL: @vmulwev_h_b(
4552673c5308Schenli // CHECK-NEXT:  entry:
4553*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
4554*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
4555*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmulwev.h.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
4556*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
4557*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4558673c5308Schenli //
vmulwev_h_b(v16i8 _1,v16i8 _2)4559673c5308Schenli v8i16 vmulwev_h_b(v16i8 _1, v16i8 _2) { return __lsx_vmulwev_h_b(_1, _2); }
4560673c5308Schenli // CHECK-LABEL: @vmulwod_d_w(
4561673c5308Schenli // CHECK-NEXT:  entry:
4562*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
4563*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
4564*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmulwod.d.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
4565*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4566*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4567673c5308Schenli //
vmulwod_d_w(v4i32 _1,v4i32 _2)4568673c5308Schenli v2i64 vmulwod_d_w(v4i32 _1, v4i32 _2) { return __lsx_vmulwod_d_w(_1, _2); }
4569673c5308Schenli // CHECK-LABEL: @vmulwod_w_h(
4570673c5308Schenli // CHECK-NEXT:  entry:
4571*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
4572*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
4573*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmulwod.w.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
4574*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
4575*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4576673c5308Schenli //
vmulwod_w_h(v8i16 _1,v8i16 _2)4577673c5308Schenli v4i32 vmulwod_w_h(v8i16 _1, v8i16 _2) { return __lsx_vmulwod_w_h(_1, _2); }
4578673c5308Schenli // CHECK-LABEL: @vmulwod_h_b(
4579673c5308Schenli // CHECK-NEXT:  entry:
4580*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
4581*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
4582*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmulwod.h.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
4583*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
4584*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4585673c5308Schenli //
vmulwod_h_b(v16i8 _1,v16i8 _2)4586673c5308Schenli v8i16 vmulwod_h_b(v16i8 _1, v16i8 _2) { return __lsx_vmulwod_h_b(_1, _2); }
4587673c5308Schenli // CHECK-LABEL: @vmulwev_d_wu(
4588673c5308Schenli // CHECK-NEXT:  entry:
4589*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
4590*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
4591*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmulwev.d.wu(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
4592*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4593*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4594673c5308Schenli //
vmulwev_d_wu(v4u32 _1,v4u32 _2)4595673c5308Schenli v2i64 vmulwev_d_wu(v4u32 _1, v4u32 _2) { return __lsx_vmulwev_d_wu(_1, _2); }
4596673c5308Schenli // CHECK-LABEL: @vmulwev_w_hu(
4597673c5308Schenli // CHECK-NEXT:  entry:
4598*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
4599*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
4600*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmulwev.w.hu(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
4601*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
4602*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4603673c5308Schenli //
vmulwev_w_hu(v8u16 _1,v8u16 _2)4604673c5308Schenli v4i32 vmulwev_w_hu(v8u16 _1, v8u16 _2) { return __lsx_vmulwev_w_hu(_1, _2); }
4605673c5308Schenli // CHECK-LABEL: @vmulwev_h_bu(
4606673c5308Schenli // CHECK-NEXT:  entry:
4607*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
4608*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
4609*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmulwev.h.bu(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
4610*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
4611*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4612673c5308Schenli //
vmulwev_h_bu(v16u8 _1,v16u8 _2)4613673c5308Schenli v8i16 vmulwev_h_bu(v16u8 _1, v16u8 _2) { return __lsx_vmulwev_h_bu(_1, _2); }
4614673c5308Schenli // CHECK-LABEL: @vmulwod_d_wu(
4615673c5308Schenli // CHECK-NEXT:  entry:
4616*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
4617*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
4618*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmulwod.d.wu(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
4619*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4620*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4621673c5308Schenli //
vmulwod_d_wu(v4u32 _1,v4u32 _2)4622673c5308Schenli v2i64 vmulwod_d_wu(v4u32 _1, v4u32 _2) { return __lsx_vmulwod_d_wu(_1, _2); }
4623673c5308Schenli // CHECK-LABEL: @vmulwod_w_hu(
4624673c5308Schenli // CHECK-NEXT:  entry:
4625*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
4626*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
4627*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmulwod.w.hu(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
4628*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
4629*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4630673c5308Schenli //
vmulwod_w_hu(v8u16 _1,v8u16 _2)4631673c5308Schenli v4i32 vmulwod_w_hu(v8u16 _1, v8u16 _2) { return __lsx_vmulwod_w_hu(_1, _2); }
4632673c5308Schenli // CHECK-LABEL: @vmulwod_h_bu(
4633673c5308Schenli // CHECK-NEXT:  entry:
4634*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
4635*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
4636*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmulwod.h.bu(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
4637*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
4638*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4639673c5308Schenli //
vmulwod_h_bu(v16u8 _1,v16u8 _2)4640673c5308Schenli v8i16 vmulwod_h_bu(v16u8 _1, v16u8 _2) { return __lsx_vmulwod_h_bu(_1, _2); }
4641673c5308Schenli // CHECK-LABEL: @vmulwev_d_wu_w(
4642673c5308Schenli // CHECK-NEXT:  entry:
4643*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
4644*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
4645*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmulwev.d.wu.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
4646*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4647*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4648673c5308Schenli //
vmulwev_d_wu_w(v4u32 _1,v4i32 _2)4649673c5308Schenli v2i64 vmulwev_d_wu_w(v4u32 _1, v4i32 _2) {
4650673c5308Schenli   return __lsx_vmulwev_d_wu_w(_1, _2);
4651673c5308Schenli }
4652673c5308Schenli // CHECK-LABEL: @vmulwev_w_hu_h(
4653673c5308Schenli // CHECK-NEXT:  entry:
4654*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
4655*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
4656*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmulwev.w.hu.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
4657*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
4658*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4659673c5308Schenli //
vmulwev_w_hu_h(v8u16 _1,v8i16 _2)4660673c5308Schenli v4i32 vmulwev_w_hu_h(v8u16 _1, v8i16 _2) {
4661673c5308Schenli   return __lsx_vmulwev_w_hu_h(_1, _2);
4662673c5308Schenli }
4663673c5308Schenli // CHECK-LABEL: @vmulwev_h_bu_b(
4664673c5308Schenli // CHECK-NEXT:  entry:
4665*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
4666*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
4667*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmulwev.h.bu.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
4668*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
4669*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4670673c5308Schenli //
vmulwev_h_bu_b(v16u8 _1,v16i8 _2)4671673c5308Schenli v8i16 vmulwev_h_bu_b(v16u8 _1, v16i8 _2) {
4672673c5308Schenli   return __lsx_vmulwev_h_bu_b(_1, _2);
4673673c5308Schenli }
4674673c5308Schenli // CHECK-LABEL: @vmulwod_d_wu_w(
4675673c5308Schenli // CHECK-NEXT:  entry:
4676*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
4677*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
4678*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmulwod.d.wu.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
4679*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4680*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4681673c5308Schenli //
vmulwod_d_wu_w(v4u32 _1,v4i32 _2)4682673c5308Schenli v2i64 vmulwod_d_wu_w(v4u32 _1, v4i32 _2) {
4683673c5308Schenli   return __lsx_vmulwod_d_wu_w(_1, _2);
4684673c5308Schenli }
4685673c5308Schenli // CHECK-LABEL: @vmulwod_w_hu_h(
4686673c5308Schenli // CHECK-NEXT:  entry:
4687*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
4688*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
4689*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmulwod.w.hu.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
4690*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
4691*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4692673c5308Schenli //
vmulwod_w_hu_h(v8u16 _1,v8i16 _2)4693673c5308Schenli v4i32 vmulwod_w_hu_h(v8u16 _1, v8i16 _2) {
4694673c5308Schenli   return __lsx_vmulwod_w_hu_h(_1, _2);
4695673c5308Schenli }
4696673c5308Schenli // CHECK-LABEL: @vmulwod_h_bu_b(
4697673c5308Schenli // CHECK-NEXT:  entry:
4698*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
4699*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
4700*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmulwod.h.bu.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
4701*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
4702*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4703673c5308Schenli //
vmulwod_h_bu_b(v16u8 _1,v16i8 _2)4704673c5308Schenli v8i16 vmulwod_h_bu_b(v16u8 _1, v16i8 _2) {
4705673c5308Schenli   return __lsx_vmulwod_h_bu_b(_1, _2);
4706673c5308Schenli }
4707673c5308Schenli // CHECK-LABEL: @vmulwev_q_d(
4708673c5308Schenli // CHECK-NEXT:  entry:
4709*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
4710*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
4711*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmulwev.q.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
4712*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4713*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4714673c5308Schenli //
vmulwev_q_d(v2i64 _1,v2i64 _2)4715673c5308Schenli v2i64 vmulwev_q_d(v2i64 _1, v2i64 _2) { return __lsx_vmulwev_q_d(_1, _2); }
4716673c5308Schenli // CHECK-LABEL: @vmulwod_q_d(
4717673c5308Schenli // CHECK-NEXT:  entry:
4718*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
4719*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
4720*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmulwod.q.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
4721*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4722*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4723673c5308Schenli //
vmulwod_q_d(v2i64 _1,v2i64 _2)4724673c5308Schenli v2i64 vmulwod_q_d(v2i64 _1, v2i64 _2) { return __lsx_vmulwod_q_d(_1, _2); }
4725673c5308Schenli // CHECK-LABEL: @vmulwev_q_du(
4726673c5308Schenli // CHECK-NEXT:  entry:
4727*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
4728*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
4729*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmulwev.q.du(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
4730*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4731*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4732673c5308Schenli //
vmulwev_q_du(v2u64 _1,v2u64 _2)4733673c5308Schenli v2i64 vmulwev_q_du(v2u64 _1, v2u64 _2) { return __lsx_vmulwev_q_du(_1, _2); }
4734673c5308Schenli // CHECK-LABEL: @vmulwod_q_du(
4735673c5308Schenli // CHECK-NEXT:  entry:
4736*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
4737*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
4738*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmulwod.q.du(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
4739*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4740*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4741673c5308Schenli //
vmulwod_q_du(v2u64 _1,v2u64 _2)4742673c5308Schenli v2i64 vmulwod_q_du(v2u64 _1, v2u64 _2) { return __lsx_vmulwod_q_du(_1, _2); }
4743673c5308Schenli // CHECK-LABEL: @vmulwev_q_du_d(
4744673c5308Schenli // CHECK-NEXT:  entry:
4745*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
4746*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
4747*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmulwev.q.du.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
4748*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4749*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4750673c5308Schenli //
vmulwev_q_du_d(v2u64 _1,v2i64 _2)4751673c5308Schenli v2i64 vmulwev_q_du_d(v2u64 _1, v2i64 _2) {
4752673c5308Schenli   return __lsx_vmulwev_q_du_d(_1, _2);
4753673c5308Schenli }
4754673c5308Schenli // CHECK-LABEL: @vmulwod_q_du_d(
4755673c5308Schenli // CHECK-NEXT:  entry:
4756*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
4757*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
4758*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmulwod.q.du.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
4759*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4760*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4761673c5308Schenli //
vmulwod_q_du_d(v2u64 _1,v2i64 _2)4762673c5308Schenli v2i64 vmulwod_q_du_d(v2u64 _1, v2i64 _2) {
4763673c5308Schenli   return __lsx_vmulwod_q_du_d(_1, _2);
4764673c5308Schenli }
4765673c5308Schenli // CHECK-LABEL: @vhaddw_q_d(
4766673c5308Schenli // CHECK-NEXT:  entry:
4767*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
4768*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
4769*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vhaddw.q.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
4770*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4771*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4772673c5308Schenli //
vhaddw_q_d(v2i64 _1,v2i64 _2)4773673c5308Schenli v2i64 vhaddw_q_d(v2i64 _1, v2i64 _2) { return __lsx_vhaddw_q_d(_1, _2); }
4774673c5308Schenli // CHECK-LABEL: @vhaddw_qu_du(
4775673c5308Schenli // CHECK-NEXT:  entry:
4776*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
4777*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
4778*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vhaddw.qu.du(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
4779*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4780*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4781673c5308Schenli //
vhaddw_qu_du(v2u64 _1,v2u64 _2)4782673c5308Schenli v2u64 vhaddw_qu_du(v2u64 _1, v2u64 _2) { return __lsx_vhaddw_qu_du(_1, _2); }
4783673c5308Schenli // CHECK-LABEL: @vhsubw_q_d(
4784673c5308Schenli // CHECK-NEXT:  entry:
4785*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
4786*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
4787*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vhsubw.q.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
4788*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4789*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4790673c5308Schenli //
vhsubw_q_d(v2i64 _1,v2i64 _2)4791673c5308Schenli v2i64 vhsubw_q_d(v2i64 _1, v2i64 _2) { return __lsx_vhsubw_q_d(_1, _2); }
4792673c5308Schenli // CHECK-LABEL: @vhsubw_qu_du(
4793673c5308Schenli // CHECK-NEXT:  entry:
4794*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
4795*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
4796*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vhsubw.qu.du(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
4797*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
4798*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
4799673c5308Schenli //
vhsubw_qu_du(v2u64 _1,v2u64 _2)4800673c5308Schenli v2u64 vhsubw_qu_du(v2u64 _1, v2u64 _2) { return __lsx_vhsubw_qu_du(_1, _2); }
4801673c5308Schenli // CHECK-LABEL: @vmaddwev_d_w(
4802673c5308Schenli // CHECK-NEXT:  entry:
4803*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
4804*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
4805*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <4 x i32>
4806*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmaddwev.d.w(<2 x i64> [[TMP0]], <4 x i32> [[TMP1]], <4 x i32> [[TMP2]])
4807*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
4808*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
4809673c5308Schenli //
vmaddwev_d_w(v2i64 _1,v4i32 _2,v4i32 _3)4810673c5308Schenli v2i64 vmaddwev_d_w(v2i64 _1, v4i32 _2, v4i32 _3) {
4811673c5308Schenli   return __lsx_vmaddwev_d_w(_1, _2, _3);
4812673c5308Schenli }
4813673c5308Schenli // CHECK-LABEL: @vmaddwev_w_h(
4814673c5308Schenli // CHECK-NEXT:  entry:
4815*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
4816*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
4817*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <8 x i16>
4818*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmaddwev.w.h(<4 x i32> [[TMP0]], <8 x i16> [[TMP1]], <8 x i16> [[TMP2]])
4819*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <4 x i32> [[TMP3]] to i128
4820*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
4821673c5308Schenli //
vmaddwev_w_h(v4i32 _1,v8i16 _2,v8i16 _3)4822673c5308Schenli v4i32 vmaddwev_w_h(v4i32 _1, v8i16 _2, v8i16 _3) {
4823673c5308Schenli   return __lsx_vmaddwev_w_h(_1, _2, _3);
4824673c5308Schenli }
4825673c5308Schenli // CHECK-LABEL: @vmaddwev_h_b(
4826673c5308Schenli // CHECK-NEXT:  entry:
4827*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
4828*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
4829*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <16 x i8>
4830*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmaddwev.h.b(<8 x i16> [[TMP0]], <16 x i8> [[TMP1]], <16 x i8> [[TMP2]])
4831*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <8 x i16> [[TMP3]] to i128
4832*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
4833673c5308Schenli //
vmaddwev_h_b(v8i16 _1,v16i8 _2,v16i8 _3)4834673c5308Schenli v8i16 vmaddwev_h_b(v8i16 _1, v16i8 _2, v16i8 _3) {
4835673c5308Schenli   return __lsx_vmaddwev_h_b(_1, _2, _3);
4836673c5308Schenli }
4837673c5308Schenli // CHECK-LABEL: @vmaddwev_d_wu(
4838673c5308Schenli // CHECK-NEXT:  entry:
4839*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
4840*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
4841*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <4 x i32>
4842*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmaddwev.d.wu(<2 x i64> [[TMP0]], <4 x i32> [[TMP1]], <4 x i32> [[TMP2]])
4843*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
4844*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
4845673c5308Schenli //
vmaddwev_d_wu(v2u64 _1,v4u32 _2,v4u32 _3)4846673c5308Schenli v2u64 vmaddwev_d_wu(v2u64 _1, v4u32 _2, v4u32 _3) {
4847673c5308Schenli   return __lsx_vmaddwev_d_wu(_1, _2, _3);
4848673c5308Schenli }
4849673c5308Schenli // CHECK-LABEL: @vmaddwev_w_hu(
4850673c5308Schenli // CHECK-NEXT:  entry:
4851*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
4852*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
4853*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <8 x i16>
4854*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmaddwev.w.hu(<4 x i32> [[TMP0]], <8 x i16> [[TMP1]], <8 x i16> [[TMP2]])
4855*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <4 x i32> [[TMP3]] to i128
4856*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
4857673c5308Schenli //
vmaddwev_w_hu(v4u32 _1,v8u16 _2,v8u16 _3)4858673c5308Schenli v4u32 vmaddwev_w_hu(v4u32 _1, v8u16 _2, v8u16 _3) {
4859673c5308Schenli   return __lsx_vmaddwev_w_hu(_1, _2, _3);
4860673c5308Schenli }
4861673c5308Schenli // CHECK-LABEL: @vmaddwev_h_bu(
4862673c5308Schenli // CHECK-NEXT:  entry:
4863*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
4864*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
4865*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <16 x i8>
4866*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmaddwev.h.bu(<8 x i16> [[TMP0]], <16 x i8> [[TMP1]], <16 x i8> [[TMP2]])
4867*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <8 x i16> [[TMP3]] to i128
4868*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
4869673c5308Schenli //
vmaddwev_h_bu(v8u16 _1,v16u8 _2,v16u8 _3)4870673c5308Schenli v8u16 vmaddwev_h_bu(v8u16 _1, v16u8 _2, v16u8 _3) {
4871673c5308Schenli   return __lsx_vmaddwev_h_bu(_1, _2, _3);
4872673c5308Schenli }
4873673c5308Schenli // CHECK-LABEL: @vmaddwod_d_w(
4874673c5308Schenli // CHECK-NEXT:  entry:
4875*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
4876*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
4877*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <4 x i32>
4878*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmaddwod.d.w(<2 x i64> [[TMP0]], <4 x i32> [[TMP1]], <4 x i32> [[TMP2]])
4879*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
4880*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
4881673c5308Schenli //
vmaddwod_d_w(v2i64 _1,v4i32 _2,v4i32 _3)4882673c5308Schenli v2i64 vmaddwod_d_w(v2i64 _1, v4i32 _2, v4i32 _3) {
4883673c5308Schenli   return __lsx_vmaddwod_d_w(_1, _2, _3);
4884673c5308Schenli }
4885673c5308Schenli // CHECK-LABEL: @vmaddwod_w_h(
4886673c5308Schenli // CHECK-NEXT:  entry:
4887*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
4888*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
4889*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <8 x i16>
4890*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmaddwod.w.h(<4 x i32> [[TMP0]], <8 x i16> [[TMP1]], <8 x i16> [[TMP2]])
4891*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <4 x i32> [[TMP3]] to i128
4892*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
4893673c5308Schenli //
vmaddwod_w_h(v4i32 _1,v8i16 _2,v8i16 _3)4894673c5308Schenli v4i32 vmaddwod_w_h(v4i32 _1, v8i16 _2, v8i16 _3) {
4895673c5308Schenli   return __lsx_vmaddwod_w_h(_1, _2, _3);
4896673c5308Schenli }
4897673c5308Schenli // CHECK-LABEL: @vmaddwod_h_b(
4898673c5308Schenli // CHECK-NEXT:  entry:
4899*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
4900*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
4901*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <16 x i8>
4902*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmaddwod.h.b(<8 x i16> [[TMP0]], <16 x i8> [[TMP1]], <16 x i8> [[TMP2]])
4903*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <8 x i16> [[TMP3]] to i128
4904*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
4905673c5308Schenli //
vmaddwod_h_b(v8i16 _1,v16i8 _2,v16i8 _3)4906673c5308Schenli v8i16 vmaddwod_h_b(v8i16 _1, v16i8 _2, v16i8 _3) {
4907673c5308Schenli   return __lsx_vmaddwod_h_b(_1, _2, _3);
4908673c5308Schenli }
4909673c5308Schenli // CHECK-LABEL: @vmaddwod_d_wu(
4910673c5308Schenli // CHECK-NEXT:  entry:
4911*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
4912*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
4913*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <4 x i32>
4914*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmaddwod.d.wu(<2 x i64> [[TMP0]], <4 x i32> [[TMP1]], <4 x i32> [[TMP2]])
4915*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
4916*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
4917673c5308Schenli //
vmaddwod_d_wu(v2u64 _1,v4u32 _2,v4u32 _3)4918673c5308Schenli v2u64 vmaddwod_d_wu(v2u64 _1, v4u32 _2, v4u32 _3) {
4919673c5308Schenli   return __lsx_vmaddwod_d_wu(_1, _2, _3);
4920673c5308Schenli }
4921673c5308Schenli // CHECK-LABEL: @vmaddwod_w_hu(
4922673c5308Schenli // CHECK-NEXT:  entry:
4923*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
4924*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
4925*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <8 x i16>
4926*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmaddwod.w.hu(<4 x i32> [[TMP0]], <8 x i16> [[TMP1]], <8 x i16> [[TMP2]])
4927*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <4 x i32> [[TMP3]] to i128
4928*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
4929673c5308Schenli //
vmaddwod_w_hu(v4u32 _1,v8u16 _2,v8u16 _3)4930673c5308Schenli v4u32 vmaddwod_w_hu(v4u32 _1, v8u16 _2, v8u16 _3) {
4931673c5308Schenli   return __lsx_vmaddwod_w_hu(_1, _2, _3);
4932673c5308Schenli }
4933673c5308Schenli // CHECK-LABEL: @vmaddwod_h_bu(
4934673c5308Schenli // CHECK-NEXT:  entry:
4935*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
4936*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
4937*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <16 x i8>
4938*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmaddwod.h.bu(<8 x i16> [[TMP0]], <16 x i8> [[TMP1]], <16 x i8> [[TMP2]])
4939*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <8 x i16> [[TMP3]] to i128
4940*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
4941673c5308Schenli //
vmaddwod_h_bu(v8u16 _1,v16u8 _2,v16u8 _3)4942673c5308Schenli v8u16 vmaddwod_h_bu(v8u16 _1, v16u8 _2, v16u8 _3) {
4943673c5308Schenli   return __lsx_vmaddwod_h_bu(_1, _2, _3);
4944673c5308Schenli }
4945673c5308Schenli // CHECK-LABEL: @vmaddwev_d_wu_w(
4946673c5308Schenli // CHECK-NEXT:  entry:
4947*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
4948*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
4949*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <4 x i32>
4950*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmaddwev.d.wu.w(<2 x i64> [[TMP0]], <4 x i32> [[TMP1]], <4 x i32> [[TMP2]])
4951*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
4952*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
4953673c5308Schenli //
vmaddwev_d_wu_w(v2i64 _1,v4u32 _2,v4i32 _3)4954673c5308Schenli v2i64 vmaddwev_d_wu_w(v2i64 _1, v4u32 _2, v4i32 _3) {
4955673c5308Schenli   return __lsx_vmaddwev_d_wu_w(_1, _2, _3);
4956673c5308Schenli }
4957673c5308Schenli // CHECK-LABEL: @vmaddwev_w_hu_h(
4958673c5308Schenli // CHECK-NEXT:  entry:
4959*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
4960*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
4961*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <8 x i16>
4962*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmaddwev.w.hu.h(<4 x i32> [[TMP0]], <8 x i16> [[TMP1]], <8 x i16> [[TMP2]])
4963*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <4 x i32> [[TMP3]] to i128
4964*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
4965673c5308Schenli //
vmaddwev_w_hu_h(v4i32 _1,v8u16 _2,v8i16 _3)4966673c5308Schenli v4i32 vmaddwev_w_hu_h(v4i32 _1, v8u16 _2, v8i16 _3) {
4967673c5308Schenli   return __lsx_vmaddwev_w_hu_h(_1, _2, _3);
4968673c5308Schenli }
4969673c5308Schenli // CHECK-LABEL: @vmaddwev_h_bu_b(
4970673c5308Schenli // CHECK-NEXT:  entry:
4971*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
4972*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
4973*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <16 x i8>
4974*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmaddwev.h.bu.b(<8 x i16> [[TMP0]], <16 x i8> [[TMP1]], <16 x i8> [[TMP2]])
4975*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <8 x i16> [[TMP3]] to i128
4976*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
4977673c5308Schenli //
vmaddwev_h_bu_b(v8i16 _1,v16u8 _2,v16i8 _3)4978673c5308Schenli v8i16 vmaddwev_h_bu_b(v8i16 _1, v16u8 _2, v16i8 _3) {
4979673c5308Schenli   return __lsx_vmaddwev_h_bu_b(_1, _2, _3);
4980673c5308Schenli }
4981673c5308Schenli // CHECK-LABEL: @vmaddwod_d_wu_w(
4982673c5308Schenli // CHECK-NEXT:  entry:
4983*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
4984*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
4985*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <4 x i32>
4986*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmaddwod.d.wu.w(<2 x i64> [[TMP0]], <4 x i32> [[TMP1]], <4 x i32> [[TMP2]])
4987*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
4988*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
4989673c5308Schenli //
vmaddwod_d_wu_w(v2i64 _1,v4u32 _2,v4i32 _3)4990673c5308Schenli v2i64 vmaddwod_d_wu_w(v2i64 _1, v4u32 _2, v4i32 _3) {
4991673c5308Schenli   return __lsx_vmaddwod_d_wu_w(_1, _2, _3);
4992673c5308Schenli }
4993673c5308Schenli // CHECK-LABEL: @vmaddwod_w_hu_h(
4994673c5308Schenli // CHECK-NEXT:  entry:
4995*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
4996*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
4997*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <8 x i16>
4998*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmaddwod.w.hu.h(<4 x i32> [[TMP0]], <8 x i16> [[TMP1]], <8 x i16> [[TMP2]])
4999*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <4 x i32> [[TMP3]] to i128
5000*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
5001673c5308Schenli //
vmaddwod_w_hu_h(v4i32 _1,v8u16 _2,v8i16 _3)5002673c5308Schenli v4i32 vmaddwod_w_hu_h(v4i32 _1, v8u16 _2, v8i16 _3) {
5003673c5308Schenli   return __lsx_vmaddwod_w_hu_h(_1, _2, _3);
5004673c5308Schenli }
5005673c5308Schenli // CHECK-LABEL: @vmaddwod_h_bu_b(
5006673c5308Schenli // CHECK-NEXT:  entry:
5007*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
5008*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
5009*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <16 x i8>
5010*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmaddwod.h.bu.b(<8 x i16> [[TMP0]], <16 x i8> [[TMP1]], <16 x i8> [[TMP2]])
5011*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <8 x i16> [[TMP3]] to i128
5012*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
5013673c5308Schenli //
vmaddwod_h_bu_b(v8i16 _1,v16u8 _2,v16i8 _3)5014673c5308Schenli v8i16 vmaddwod_h_bu_b(v8i16 _1, v16u8 _2, v16i8 _3) {
5015673c5308Schenli   return __lsx_vmaddwod_h_bu_b(_1, _2, _3);
5016673c5308Schenli }
5017673c5308Schenli // CHECK-LABEL: @vmaddwev_q_d(
5018673c5308Schenli // CHECK-NEXT:  entry:
5019*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
5020*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
5021*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <2 x i64>
5022*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmaddwev.q.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i64> [[TMP2]])
5023*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
5024*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
5025673c5308Schenli //
vmaddwev_q_d(v2i64 _1,v2i64 _2,v2i64 _3)5026673c5308Schenli v2i64 vmaddwev_q_d(v2i64 _1, v2i64 _2, v2i64 _3) {
5027673c5308Schenli   return __lsx_vmaddwev_q_d(_1, _2, _3);
5028673c5308Schenli }
5029673c5308Schenli // CHECK-LABEL: @vmaddwod_q_d(
5030673c5308Schenli // CHECK-NEXT:  entry:
5031*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
5032*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
5033*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <2 x i64>
5034*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmaddwod.q.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i64> [[TMP2]])
5035*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
5036*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
5037673c5308Schenli //
vmaddwod_q_d(v2i64 _1,v2i64 _2,v2i64 _3)5038673c5308Schenli v2i64 vmaddwod_q_d(v2i64 _1, v2i64 _2, v2i64 _3) {
5039673c5308Schenli   return __lsx_vmaddwod_q_d(_1, _2, _3);
5040673c5308Schenli }
5041673c5308Schenli // CHECK-LABEL: @vmaddwev_q_du(
5042673c5308Schenli // CHECK-NEXT:  entry:
5043*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
5044*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
5045*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <2 x i64>
5046*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmaddwev.q.du(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i64> [[TMP2]])
5047*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
5048*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
5049673c5308Schenli //
vmaddwev_q_du(v2u64 _1,v2u64 _2,v2u64 _3)5050673c5308Schenli v2u64 vmaddwev_q_du(v2u64 _1, v2u64 _2, v2u64 _3) {
5051673c5308Schenli   return __lsx_vmaddwev_q_du(_1, _2, _3);
5052673c5308Schenli }
5053673c5308Schenli // CHECK-LABEL: @vmaddwod_q_du(
5054673c5308Schenli // CHECK-NEXT:  entry:
5055*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
5056*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
5057*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <2 x i64>
5058*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmaddwod.q.du(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i64> [[TMP2]])
5059*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
5060*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
5061673c5308Schenli //
vmaddwod_q_du(v2u64 _1,v2u64 _2,v2u64 _3)5062673c5308Schenli v2u64 vmaddwod_q_du(v2u64 _1, v2u64 _2, v2u64 _3) {
5063673c5308Schenli   return __lsx_vmaddwod_q_du(_1, _2, _3);
5064673c5308Schenli }
5065673c5308Schenli // CHECK-LABEL: @vmaddwev_q_du_d(
5066673c5308Schenli // CHECK-NEXT:  entry:
5067*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
5068*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
5069*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <2 x i64>
5070*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmaddwev.q.du.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i64> [[TMP2]])
5071*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
5072*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
5073673c5308Schenli //
vmaddwev_q_du_d(v2i64 _1,v2u64 _2,v2i64 _3)5074673c5308Schenli v2i64 vmaddwev_q_du_d(v2i64 _1, v2u64 _2, v2i64 _3) {
5075673c5308Schenli   return __lsx_vmaddwev_q_du_d(_1, _2, _3);
5076673c5308Schenli }
5077673c5308Schenli // CHECK-LABEL: @vmaddwod_q_du_d(
5078673c5308Schenli // CHECK-NEXT:  entry:
5079*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
5080*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
5081*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <2 x i64>
5082*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmaddwod.q.du.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i64> [[TMP2]])
5083*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
5084*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
5085673c5308Schenli //
vmaddwod_q_du_d(v2i64 _1,v2u64 _2,v2i64 _3)5086673c5308Schenli v2i64 vmaddwod_q_du_d(v2i64 _1, v2u64 _2, v2i64 _3) {
5087673c5308Schenli   return __lsx_vmaddwod_q_du_d(_1, _2, _3);
5088673c5308Schenli }
5089673c5308Schenli // CHECK-LABEL: @vrotr_b(
5090673c5308Schenli // CHECK-NEXT:  entry:
5091*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
5092*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
5093*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vrotr.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
5094*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
5095*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5096673c5308Schenli //
vrotr_b(v16i8 _1,v16i8 _2)5097673c5308Schenli v16i8 vrotr_b(v16i8 _1, v16i8 _2) { return __lsx_vrotr_b(_1, _2); }
5098673c5308Schenli // CHECK-LABEL: @vrotr_h(
5099673c5308Schenli // CHECK-NEXT:  entry:
5100*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
5101*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
5102*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vrotr.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
5103*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
5104*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5105673c5308Schenli //
vrotr_h(v8i16 _1,v8i16 _2)5106673c5308Schenli v8i16 vrotr_h(v8i16 _1, v8i16 _2) { return __lsx_vrotr_h(_1, _2); }
5107673c5308Schenli // CHECK-LABEL: @vrotr_w(
5108673c5308Schenli // CHECK-NEXT:  entry:
5109*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
5110*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
5111*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vrotr.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
5112*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
5113*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5114673c5308Schenli //
vrotr_w(v4i32 _1,v4i32 _2)5115673c5308Schenli v4i32 vrotr_w(v4i32 _1, v4i32 _2) { return __lsx_vrotr_w(_1, _2); }
5116673c5308Schenli // CHECK-LABEL: @vrotr_d(
5117673c5308Schenli // CHECK-NEXT:  entry:
5118*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
5119*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
5120*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vrotr.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
5121*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
5122*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5123673c5308Schenli //
vrotr_d(v2i64 _1,v2i64 _2)5124673c5308Schenli v2i64 vrotr_d(v2i64 _1, v2i64 _2) { return __lsx_vrotr_d(_1, _2); }
5125673c5308Schenli // CHECK-LABEL: @vadd_q(
5126673c5308Schenli // CHECK-NEXT:  entry:
5127*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
5128*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
5129*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vadd.q(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
5130*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
5131*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5132673c5308Schenli //
vadd_q(v2i64 _1,v2i64 _2)5133673c5308Schenli v2i64 vadd_q(v2i64 _1, v2i64 _2) { return __lsx_vadd_q(_1, _2); }
5134673c5308Schenli // CHECK-LABEL: @vsub_q(
5135673c5308Schenli // CHECK-NEXT:  entry:
5136*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
5137*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
5138*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsub.q(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
5139*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
5140*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5141673c5308Schenli //
vsub_q(v2i64 _1,v2i64 _2)5142673c5308Schenli v2i64 vsub_q(v2i64 _1, v2i64 _2) { return __lsx_vsub_q(_1, _2); }
5143673c5308Schenli // CHECK-LABEL: @vldrepl_b(
5144673c5308Schenli // CHECK-NEXT:  entry:
5145673c5308Schenli // CHECK-NEXT:    [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vldrepl.b(ptr [[_1:%.*]], i32 1)
5146*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to i128
5147*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP1]]
5148673c5308Schenli //
vldrepl_b(void * _1)5149673c5308Schenli v16i8 vldrepl_b(void *_1) { return __lsx_vldrepl_b(_1, 1); }
5150673c5308Schenli // CHECK-LABEL: @vldrepl_h(
5151673c5308Schenli // CHECK-NEXT:  entry:
5152673c5308Schenli // CHECK-NEXT:    [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vldrepl.h(ptr [[_1:%.*]], i32 2)
5153*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i16> [[TMP0]] to i128
5154*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP1]]
5155673c5308Schenli //
vldrepl_h(void * _1)5156673c5308Schenli v8i16 vldrepl_h(void *_1) { return __lsx_vldrepl_h(_1, 2); }
5157673c5308Schenli // CHECK-LABEL: @vldrepl_w(
5158673c5308Schenli // CHECK-NEXT:  entry:
5159673c5308Schenli // CHECK-NEXT:    [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vldrepl.w(ptr [[_1:%.*]], i32 4)
5160*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to i128
5161*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP1]]
5162673c5308Schenli //
vldrepl_w(void * _1)5163673c5308Schenli v4i32 vldrepl_w(void *_1) { return __lsx_vldrepl_w(_1, 4); }
5164673c5308Schenli // CHECK-LABEL: @vldrepl_d(
5165673c5308Schenli // CHECK-NEXT:  entry:
5166673c5308Schenli // CHECK-NEXT:    [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vldrepl.d(ptr [[_1:%.*]], i32 8)
5167*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to i128
5168*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP1]]
5169673c5308Schenli //
vldrepl_d(void * _1)5170673c5308Schenli v2i64 vldrepl_d(void *_1) { return __lsx_vldrepl_d(_1, 8); }
5171673c5308Schenli // CHECK-LABEL: @vmskgez_b(
5172673c5308Schenli // CHECK-NEXT:  entry:
5173*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
5174*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmskgez.b(<16 x i8> [[TMP0]])
5175*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
5176*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
5177673c5308Schenli //
vmskgez_b(v16i8 _1)5178673c5308Schenli v16i8 vmskgez_b(v16i8 _1) { return __lsx_vmskgez_b(_1); }
5179673c5308Schenli // CHECK-LABEL: @vmsknz_b(
5180673c5308Schenli // CHECK-NEXT:  entry:
5181*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
5182*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmsknz.b(<16 x i8> [[TMP0]])
5183*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
5184*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
5185673c5308Schenli //
vmsknz_b(v16i8 _1)5186673c5308Schenli v16i8 vmsknz_b(v16i8 _1) { return __lsx_vmsknz_b(_1); }
5187673c5308Schenli // CHECK-LABEL: @vexth_h_b(
5188673c5308Schenli // CHECK-NEXT:  entry:
5189*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
5190*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vexth.h.b(<16 x i8> [[TMP0]])
5191*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
5192*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
5193673c5308Schenli //
vexth_h_b(v16i8 _1)5194673c5308Schenli v8i16 vexth_h_b(v16i8 _1) { return __lsx_vexth_h_b(_1); }
5195673c5308Schenli // CHECK-LABEL: @vexth_w_h(
5196673c5308Schenli // CHECK-NEXT:  entry:
5197*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
5198*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vexth.w.h(<8 x i16> [[TMP0]])
5199*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
5200*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
5201673c5308Schenli //
vexth_w_h(v8i16 _1)5202673c5308Schenli v4i32 vexth_w_h(v8i16 _1) { return __lsx_vexth_w_h(_1); }
5203673c5308Schenli // CHECK-LABEL: @vexth_d_w(
5204673c5308Schenli // CHECK-NEXT:  entry:
5205*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
5206*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vexth.d.w(<4 x i32> [[TMP0]])
5207*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
5208*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
5209673c5308Schenli //
vexth_d_w(v4i32 _1)5210673c5308Schenli v2i64 vexth_d_w(v4i32 _1) { return __lsx_vexth_d_w(_1); }
5211673c5308Schenli // CHECK-LABEL: @vexth_q_d(
5212673c5308Schenli // CHECK-NEXT:  entry:
5213*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
5214*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vexth.q.d(<2 x i64> [[TMP0]])
5215*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
5216*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
5217673c5308Schenli //
vexth_q_d(v2i64 _1)5218673c5308Schenli v2i64 vexth_q_d(v2i64 _1) { return __lsx_vexth_q_d(_1); }
5219673c5308Schenli // CHECK-LABEL: @vexth_hu_bu(
5220673c5308Schenli // CHECK-NEXT:  entry:
5221*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
5222*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vexth.hu.bu(<16 x i8> [[TMP0]])
5223*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
5224*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
5225673c5308Schenli //
vexth_hu_bu(v16u8 _1)5226673c5308Schenli v8u16 vexth_hu_bu(v16u8 _1) { return __lsx_vexth_hu_bu(_1); }
5227673c5308Schenli // CHECK-LABEL: @vexth_wu_hu(
5228673c5308Schenli // CHECK-NEXT:  entry:
5229*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
5230*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vexth.wu.hu(<8 x i16> [[TMP0]])
5231*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
5232*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
5233673c5308Schenli //
vexth_wu_hu(v8u16 _1)5234673c5308Schenli v4u32 vexth_wu_hu(v8u16 _1) { return __lsx_vexth_wu_hu(_1); }
5235673c5308Schenli // CHECK-LABEL: @vexth_du_wu(
5236673c5308Schenli // CHECK-NEXT:  entry:
5237*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
5238*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vexth.du.wu(<4 x i32> [[TMP0]])
5239*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
5240*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
5241673c5308Schenli //
vexth_du_wu(v4u32 _1)5242673c5308Schenli v2u64 vexth_du_wu(v4u32 _1) { return __lsx_vexth_du_wu(_1); }
5243673c5308Schenli // CHECK-LABEL: @vexth_qu_du(
5244673c5308Schenli // CHECK-NEXT:  entry:
5245*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
5246*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vexth.qu.du(<2 x i64> [[TMP0]])
5247*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
5248*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
5249673c5308Schenli //
vexth_qu_du(v2u64 _1)5250673c5308Schenli v2u64 vexth_qu_du(v2u64 _1) { return __lsx_vexth_qu_du(_1); }
5251673c5308Schenli // CHECK-LABEL: @vrotri_b(
5252673c5308Schenli // CHECK-NEXT:  entry:
5253*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
5254*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vrotri.b(<16 x i8> [[TMP0]], i32 1)
5255*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
5256*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
5257673c5308Schenli //
vrotri_b(v16i8 _1)5258673c5308Schenli v16i8 vrotri_b(v16i8 _1) { return __lsx_vrotri_b(_1, 1); }
5259673c5308Schenli // CHECK-LABEL: @vrotri_h(
5260673c5308Schenli // CHECK-NEXT:  entry:
5261*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
5262*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vrotri.h(<8 x i16> [[TMP0]], i32 1)
5263*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
5264*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
5265673c5308Schenli //
vrotri_h(v8i16 _1)5266673c5308Schenli v8i16 vrotri_h(v8i16 _1) { return __lsx_vrotri_h(_1, 1); }
5267673c5308Schenli // CHECK-LABEL: @vrotri_w(
5268673c5308Schenli // CHECK-NEXT:  entry:
5269*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
5270*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vrotri.w(<4 x i32> [[TMP0]], i32 1)
5271*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
5272*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
5273673c5308Schenli //
vrotri_w(v4i32 _1)5274673c5308Schenli v4i32 vrotri_w(v4i32 _1) { return __lsx_vrotri_w(_1, 1); }
5275673c5308Schenli // CHECK-LABEL: @vrotri_d(
5276673c5308Schenli // CHECK-NEXT:  entry:
5277*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
5278*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vrotri.d(<2 x i64> [[TMP0]], i32 1)
5279*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
5280*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
5281673c5308Schenli //
vrotri_d(v2i64 _1)5282673c5308Schenli v2i64 vrotri_d(v2i64 _1) { return __lsx_vrotri_d(_1, 1); }
5283673c5308Schenli // CHECK-LABEL: @vextl_q_d(
5284673c5308Schenli // CHECK-NEXT:  entry:
5285*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
5286*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vextl.q.d(<2 x i64> [[TMP0]])
5287*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
5288*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
5289673c5308Schenli //
vextl_q_d(v2i64 _1)5290673c5308Schenli v2i64 vextl_q_d(v2i64 _1) { return __lsx_vextl_q_d(_1); }
5291673c5308Schenli // CHECK-LABEL: @vsrlni_b_h(
5292673c5308Schenli // CHECK-NEXT:  entry:
5293*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
5294*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
5295*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsrlni.b.h(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]], i32 1)
5296*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
5297*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5298673c5308Schenli //
vsrlni_b_h(v16i8 _1,v16i8 _2)5299673c5308Schenli v16i8 vsrlni_b_h(v16i8 _1, v16i8 _2) { return __lsx_vsrlni_b_h(_1, _2, 1); }
5300673c5308Schenli // CHECK-LABEL: @vsrlni_h_w(
5301673c5308Schenli // CHECK-NEXT:  entry:
5302*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
5303*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
5304*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsrlni.h.w(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]], i32 1)
5305*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
5306*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5307673c5308Schenli //
vsrlni_h_w(v8i16 _1,v8i16 _2)5308673c5308Schenli v8i16 vsrlni_h_w(v8i16 _1, v8i16 _2) { return __lsx_vsrlni_h_w(_1, _2, 1); }
5309673c5308Schenli // CHECK-LABEL: @vsrlni_w_d(
5310673c5308Schenli // CHECK-NEXT:  entry:
5311*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
5312*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
5313*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsrlni.w.d(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]], i32 1)
5314*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
5315*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5316673c5308Schenli //
vsrlni_w_d(v4i32 _1,v4i32 _2)5317673c5308Schenli v4i32 vsrlni_w_d(v4i32 _1, v4i32 _2) { return __lsx_vsrlni_w_d(_1, _2, 1); }
5318673c5308Schenli // CHECK-LABEL: @vsrlni_d_q(
5319673c5308Schenli // CHECK-NEXT:  entry:
5320*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
5321*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
5322*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsrlni.d.q(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]], i32 1)
5323*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
5324*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5325673c5308Schenli //
vsrlni_d_q(v2i64 _1,v2i64 _2)5326673c5308Schenli v2i64 vsrlni_d_q(v2i64 _1, v2i64 _2) { return __lsx_vsrlni_d_q(_1, _2, 1); }
5327673c5308Schenli // CHECK-LABEL: @vsrlrni_b_h(
5328673c5308Schenli // CHECK-NEXT:  entry:
5329*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
5330*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
5331*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsrlrni.b.h(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]], i32 1)
5332*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
5333*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5334673c5308Schenli //
vsrlrni_b_h(v16i8 _1,v16i8 _2)5335673c5308Schenli v16i8 vsrlrni_b_h(v16i8 _1, v16i8 _2) { return __lsx_vsrlrni_b_h(_1, _2, 1); }
5336673c5308Schenli // CHECK-LABEL: @vsrlrni_h_w(
5337673c5308Schenli // CHECK-NEXT:  entry:
5338*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
5339*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
5340*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsrlrni.h.w(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]], i32 1)
5341*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
5342*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5343673c5308Schenli //
vsrlrni_h_w(v8i16 _1,v8i16 _2)5344673c5308Schenli v8i16 vsrlrni_h_w(v8i16 _1, v8i16 _2) { return __lsx_vsrlrni_h_w(_1, _2, 1); }
5345673c5308Schenli // CHECK-LABEL: @vsrlrni_w_d(
5346673c5308Schenli // CHECK-NEXT:  entry:
5347*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
5348*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
5349*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsrlrni.w.d(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]], i32 1)
5350*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
5351*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5352673c5308Schenli //
vsrlrni_w_d(v4i32 _1,v4i32 _2)5353673c5308Schenli v4i32 vsrlrni_w_d(v4i32 _1, v4i32 _2) { return __lsx_vsrlrni_w_d(_1, _2, 1); }
5354673c5308Schenli // CHECK-LABEL: @vsrlrni_d_q(
5355673c5308Schenli // CHECK-NEXT:  entry:
5356*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
5357*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
5358*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsrlrni.d.q(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]], i32 1)
5359*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
5360*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5361673c5308Schenli //
vsrlrni_d_q(v2i64 _1,v2i64 _2)5362673c5308Schenli v2i64 vsrlrni_d_q(v2i64 _1, v2i64 _2) { return __lsx_vsrlrni_d_q(_1, _2, 1); }
5363673c5308Schenli // CHECK-LABEL: @vssrlni_b_h(
5364673c5308Schenli // CHECK-NEXT:  entry:
5365*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
5366*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
5367*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssrlni.b.h(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]], i32 1)
5368*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
5369*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5370673c5308Schenli //
vssrlni_b_h(v16i8 _1,v16i8 _2)5371673c5308Schenli v16i8 vssrlni_b_h(v16i8 _1, v16i8 _2) { return __lsx_vssrlni_b_h(_1, _2, 1); }
5372673c5308Schenli // CHECK-LABEL: @vssrlni_h_w(
5373673c5308Schenli // CHECK-NEXT:  entry:
5374*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
5375*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
5376*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssrlni.h.w(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]], i32 1)
5377*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
5378*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5379673c5308Schenli //
vssrlni_h_w(v8i16 _1,v8i16 _2)5380673c5308Schenli v8i16 vssrlni_h_w(v8i16 _1, v8i16 _2) { return __lsx_vssrlni_h_w(_1, _2, 1); }
5381673c5308Schenli // CHECK-LABEL: @vssrlni_w_d(
5382673c5308Schenli // CHECK-NEXT:  entry:
5383*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
5384*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
5385*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssrlni.w.d(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]], i32 1)
5386*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
5387*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5388673c5308Schenli //
vssrlni_w_d(v4i32 _1,v4i32 _2)5389673c5308Schenli v4i32 vssrlni_w_d(v4i32 _1, v4i32 _2) { return __lsx_vssrlni_w_d(_1, _2, 1); }
5390673c5308Schenli // CHECK-LABEL: @vssrlni_d_q(
5391673c5308Schenli // CHECK-NEXT:  entry:
5392*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
5393*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
5394*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vssrlni.d.q(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]], i32 1)
5395*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
5396*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5397673c5308Schenli //
vssrlni_d_q(v2i64 _1,v2i64 _2)5398673c5308Schenli v2i64 vssrlni_d_q(v2i64 _1, v2i64 _2) { return __lsx_vssrlni_d_q(_1, _2, 1); }
5399673c5308Schenli // CHECK-LABEL: @vssrlni_bu_h(
5400673c5308Schenli // CHECK-NEXT:  entry:
5401*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
5402*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
5403*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssrlni.bu.h(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]], i32 1)
5404*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
5405*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5406673c5308Schenli //
vssrlni_bu_h(v16u8 _1,v16i8 _2)5407673c5308Schenli v16u8 vssrlni_bu_h(v16u8 _1, v16i8 _2) { return __lsx_vssrlni_bu_h(_1, _2, 1); }
5408673c5308Schenli // CHECK-LABEL: @vssrlni_hu_w(
5409673c5308Schenli // CHECK-NEXT:  entry:
5410*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
5411*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
5412*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssrlni.hu.w(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]], i32 1)
5413*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
5414*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5415673c5308Schenli //
vssrlni_hu_w(v8u16 _1,v8i16 _2)5416673c5308Schenli v8u16 vssrlni_hu_w(v8u16 _1, v8i16 _2) { return __lsx_vssrlni_hu_w(_1, _2, 1); }
5417673c5308Schenli // CHECK-LABEL: @vssrlni_wu_d(
5418673c5308Schenli // CHECK-NEXT:  entry:
5419*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
5420*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
5421*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssrlni.wu.d(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]], i32 1)
5422*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
5423*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5424673c5308Schenli //
vssrlni_wu_d(v4u32 _1,v4i32 _2)5425673c5308Schenli v4u32 vssrlni_wu_d(v4u32 _1, v4i32 _2) { return __lsx_vssrlni_wu_d(_1, _2, 1); }
5426673c5308Schenli // CHECK-LABEL: @vssrlni_du_q(
5427673c5308Schenli // CHECK-NEXT:  entry:
5428*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
5429*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
5430*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vssrlni.du.q(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]], i32 1)
5431*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
5432*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5433673c5308Schenli //
vssrlni_du_q(v2u64 _1,v2i64 _2)5434673c5308Schenli v2u64 vssrlni_du_q(v2u64 _1, v2i64 _2) { return __lsx_vssrlni_du_q(_1, _2, 1); }
5435673c5308Schenli // CHECK-LABEL: @vssrlrni_b_h(
5436673c5308Schenli // CHECK-NEXT:  entry:
5437*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
5438*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
5439*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssrlrni.b.h(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]], i32 1)
5440*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
5441*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5442673c5308Schenli //
vssrlrni_b_h(v16i8 _1,v16i8 _2)5443673c5308Schenli v16i8 vssrlrni_b_h(v16i8 _1, v16i8 _2) { return __lsx_vssrlrni_b_h(_1, _2, 1); }
5444673c5308Schenli // CHECK-LABEL: @vssrlrni_h_w(
5445673c5308Schenli // CHECK-NEXT:  entry:
5446*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
5447*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
5448*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssrlrni.h.w(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]], i32 1)
5449*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
5450*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5451673c5308Schenli //
vssrlrni_h_w(v8i16 _1,v8i16 _2)5452673c5308Schenli v8i16 vssrlrni_h_w(v8i16 _1, v8i16 _2) { return __lsx_vssrlrni_h_w(_1, _2, 1); }
5453673c5308Schenli // CHECK-LABEL: @vssrlrni_w_d(
5454673c5308Schenli // CHECK-NEXT:  entry:
5455*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
5456*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
5457*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssrlrni.w.d(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]], i32 1)
5458*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
5459*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5460673c5308Schenli //
vssrlrni_w_d(v4i32 _1,v4i32 _2)5461673c5308Schenli v4i32 vssrlrni_w_d(v4i32 _1, v4i32 _2) { return __lsx_vssrlrni_w_d(_1, _2, 1); }
5462673c5308Schenli // CHECK-LABEL: @vssrlrni_d_q(
5463673c5308Schenli // CHECK-NEXT:  entry:
5464*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
5465*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
5466*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vssrlrni.d.q(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]], i32 1)
5467*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
5468*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5469673c5308Schenli //
vssrlrni_d_q(v2i64 _1,v2i64 _2)5470673c5308Schenli v2i64 vssrlrni_d_q(v2i64 _1, v2i64 _2) { return __lsx_vssrlrni_d_q(_1, _2, 1); }
5471673c5308Schenli // CHECK-LABEL: @vssrlrni_bu_h(
5472673c5308Schenli // CHECK-NEXT:  entry:
5473*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
5474*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
5475*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssrlrni.bu.h(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]], i32 1)
5476*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
5477*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5478673c5308Schenli //
vssrlrni_bu_h(v16u8 _1,v16i8 _2)5479673c5308Schenli v16u8 vssrlrni_bu_h(v16u8 _1, v16i8 _2) {
5480673c5308Schenli   return __lsx_vssrlrni_bu_h(_1, _2, 1);
5481673c5308Schenli }
5482673c5308Schenli // CHECK-LABEL: @vssrlrni_hu_w(
5483673c5308Schenli // CHECK-NEXT:  entry:
5484*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
5485*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
5486*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssrlrni.hu.w(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]], i32 1)
5487*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
5488*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5489673c5308Schenli //
vssrlrni_hu_w(v8u16 _1,v8i16 _2)5490673c5308Schenli v8u16 vssrlrni_hu_w(v8u16 _1, v8i16 _2) {
5491673c5308Schenli   return __lsx_vssrlrni_hu_w(_1, _2, 1);
5492673c5308Schenli }
5493673c5308Schenli // CHECK-LABEL: @vssrlrni_wu_d(
5494673c5308Schenli // CHECK-NEXT:  entry:
5495*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
5496*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
5497*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssrlrni.wu.d(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]], i32 1)
5498*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
5499*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5500673c5308Schenli //
vssrlrni_wu_d(v4u32 _1,v4i32 _2)5501673c5308Schenli v4u32 vssrlrni_wu_d(v4u32 _1, v4i32 _2) {
5502673c5308Schenli   return __lsx_vssrlrni_wu_d(_1, _2, 1);
5503673c5308Schenli }
5504673c5308Schenli // CHECK-LABEL: @vssrlrni_du_q(
5505673c5308Schenli // CHECK-NEXT:  entry:
5506*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
5507*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
5508*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vssrlrni.du.q(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]], i32 1)
5509*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
5510*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5511673c5308Schenli //
vssrlrni_du_q(v2u64 _1,v2i64 _2)5512673c5308Schenli v2u64 vssrlrni_du_q(v2u64 _1, v2i64 _2) {
5513673c5308Schenli   return __lsx_vssrlrni_du_q(_1, _2, 1);
5514673c5308Schenli }
5515673c5308Schenli // CHECK-LABEL: @vsrani_b_h(
5516673c5308Schenli // CHECK-NEXT:  entry:
5517*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
5518*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
5519*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsrani.b.h(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]], i32 1)
5520*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
5521*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5522673c5308Schenli //
vsrani_b_h(v16i8 _1,v16i8 _2)5523673c5308Schenli v16i8 vsrani_b_h(v16i8 _1, v16i8 _2) { return __lsx_vsrani_b_h(_1, _2, 1); }
5524673c5308Schenli // CHECK-LABEL: @vsrani_h_w(
5525673c5308Schenli // CHECK-NEXT:  entry:
5526*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
5527*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
5528*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsrani.h.w(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]], i32 1)
5529*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
5530*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5531673c5308Schenli //
vsrani_h_w(v8i16 _1,v8i16 _2)5532673c5308Schenli v8i16 vsrani_h_w(v8i16 _1, v8i16 _2) { return __lsx_vsrani_h_w(_1, _2, 1); }
5533673c5308Schenli // CHECK-LABEL: @vsrani_w_d(
5534673c5308Schenli // CHECK-NEXT:  entry:
5535*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
5536*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
5537*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsrani.w.d(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]], i32 1)
5538*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
5539*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5540673c5308Schenli //
vsrani_w_d(v4i32 _1,v4i32 _2)5541673c5308Schenli v4i32 vsrani_w_d(v4i32 _1, v4i32 _2) { return __lsx_vsrani_w_d(_1, _2, 1); }
5542673c5308Schenli // CHECK-LABEL: @vsrani_d_q(
5543673c5308Schenli // CHECK-NEXT:  entry:
5544*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
5545*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
5546*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsrani.d.q(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]], i32 1)
5547*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
5548*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5549673c5308Schenli //
vsrani_d_q(v2i64 _1,v2i64 _2)5550673c5308Schenli v2i64 vsrani_d_q(v2i64 _1, v2i64 _2) { return __lsx_vsrani_d_q(_1, _2, 1); }
5551673c5308Schenli // CHECK-LABEL: @vsrarni_b_h(
5552673c5308Schenli // CHECK-NEXT:  entry:
5553*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
5554*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
5555*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsrarni.b.h(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]], i32 1)
5556*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
5557*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5558673c5308Schenli //
vsrarni_b_h(v16i8 _1,v16i8 _2)5559673c5308Schenli v16i8 vsrarni_b_h(v16i8 _1, v16i8 _2) { return __lsx_vsrarni_b_h(_1, _2, 1); }
5560673c5308Schenli // CHECK-LABEL: @vsrarni_h_w(
5561673c5308Schenli // CHECK-NEXT:  entry:
5562*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
5563*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
5564*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsrarni.h.w(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]], i32 1)
5565*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
5566*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5567673c5308Schenli //
vsrarni_h_w(v8i16 _1,v8i16 _2)5568673c5308Schenli v8i16 vsrarni_h_w(v8i16 _1, v8i16 _2) { return __lsx_vsrarni_h_w(_1, _2, 1); }
5569673c5308Schenli // CHECK-LABEL: @vsrarni_w_d(
5570673c5308Schenli // CHECK-NEXT:  entry:
5571*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
5572*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
5573*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsrarni.w.d(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]], i32 1)
5574*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
5575*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5576673c5308Schenli //
vsrarni_w_d(v4i32 _1,v4i32 _2)5577673c5308Schenli v4i32 vsrarni_w_d(v4i32 _1, v4i32 _2) { return __lsx_vsrarni_w_d(_1, _2, 1); }
5578673c5308Schenli // CHECK-LABEL: @vsrarni_d_q(
5579673c5308Schenli // CHECK-NEXT:  entry:
5580*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
5581*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
5582*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsrarni.d.q(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]], i32 1)
5583*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
5584*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5585673c5308Schenli //
vsrarni_d_q(v2i64 _1,v2i64 _2)5586673c5308Schenli v2i64 vsrarni_d_q(v2i64 _1, v2i64 _2) { return __lsx_vsrarni_d_q(_1, _2, 1); }
5587673c5308Schenli // CHECK-LABEL: @vssrani_b_h(
5588673c5308Schenli // CHECK-NEXT:  entry:
5589*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
5590*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
5591*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssrani.b.h(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]], i32 1)
5592*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
5593*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5594673c5308Schenli //
vssrani_b_h(v16i8 _1,v16i8 _2)5595673c5308Schenli v16i8 vssrani_b_h(v16i8 _1, v16i8 _2) { return __lsx_vssrani_b_h(_1, _2, 1); }
5596673c5308Schenli // CHECK-LABEL: @vssrani_h_w(
5597673c5308Schenli // CHECK-NEXT:  entry:
5598*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
5599*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
5600*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssrani.h.w(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]], i32 1)
5601*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
5602*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5603673c5308Schenli //
vssrani_h_w(v8i16 _1,v8i16 _2)5604673c5308Schenli v8i16 vssrani_h_w(v8i16 _1, v8i16 _2) { return __lsx_vssrani_h_w(_1, _2, 1); }
5605673c5308Schenli // CHECK-LABEL: @vssrani_w_d(
5606673c5308Schenli // CHECK-NEXT:  entry:
5607*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
5608*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
5609*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssrani.w.d(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]], i32 1)
5610*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
5611*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5612673c5308Schenli //
vssrani_w_d(v4i32 _1,v4i32 _2)5613673c5308Schenli v4i32 vssrani_w_d(v4i32 _1, v4i32 _2) { return __lsx_vssrani_w_d(_1, _2, 1); }
5614673c5308Schenli // CHECK-LABEL: @vssrani_d_q(
5615673c5308Schenli // CHECK-NEXT:  entry:
5616*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
5617*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
5618*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vssrani.d.q(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]], i32 1)
5619*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
5620*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5621673c5308Schenli //
vssrani_d_q(v2i64 _1,v2i64 _2)5622673c5308Schenli v2i64 vssrani_d_q(v2i64 _1, v2i64 _2) { return __lsx_vssrani_d_q(_1, _2, 1); }
5623673c5308Schenli // CHECK-LABEL: @vssrani_bu_h(
5624673c5308Schenli // CHECK-NEXT:  entry:
5625*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
5626*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
5627*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssrani.bu.h(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]], i32 1)
5628*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
5629*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5630673c5308Schenli //
vssrani_bu_h(v16u8 _1,v16i8 _2)5631673c5308Schenli v16u8 vssrani_bu_h(v16u8 _1, v16i8 _2) { return __lsx_vssrani_bu_h(_1, _2, 1); }
5632673c5308Schenli // CHECK-LABEL: @vssrani_hu_w(
5633673c5308Schenli // CHECK-NEXT:  entry:
5634*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
5635*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
5636*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssrani.hu.w(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]], i32 1)
5637*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
5638*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5639673c5308Schenli //
vssrani_hu_w(v8u16 _1,v8i16 _2)5640673c5308Schenli v8u16 vssrani_hu_w(v8u16 _1, v8i16 _2) { return __lsx_vssrani_hu_w(_1, _2, 1); }
5641673c5308Schenli // CHECK-LABEL: @vssrani_wu_d(
5642673c5308Schenli // CHECK-NEXT:  entry:
5643*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
5644*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
5645*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssrani.wu.d(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]], i32 1)
5646*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
5647*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5648673c5308Schenli //
vssrani_wu_d(v4u32 _1,v4i32 _2)5649673c5308Schenli v4u32 vssrani_wu_d(v4u32 _1, v4i32 _2) { return __lsx_vssrani_wu_d(_1, _2, 1); }
5650673c5308Schenli // CHECK-LABEL: @vssrani_du_q(
5651673c5308Schenli // CHECK-NEXT:  entry:
5652*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
5653*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
5654*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vssrani.du.q(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]], i32 1)
5655*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
5656*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5657673c5308Schenli //
vssrani_du_q(v2u64 _1,v2i64 _2)5658673c5308Schenli v2u64 vssrani_du_q(v2u64 _1, v2i64 _2) { return __lsx_vssrani_du_q(_1, _2, 1); }
5659673c5308Schenli // CHECK-LABEL: @vssrarni_b_h(
5660673c5308Schenli // CHECK-NEXT:  entry:
5661*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
5662*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
5663*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssrarni.b.h(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]], i32 1)
5664*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
5665*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5666673c5308Schenli //
vssrarni_b_h(v16i8 _1,v16i8 _2)5667673c5308Schenli v16i8 vssrarni_b_h(v16i8 _1, v16i8 _2) { return __lsx_vssrarni_b_h(_1, _2, 1); }
5668673c5308Schenli // CHECK-LABEL: @vssrarni_h_w(
5669673c5308Schenli // CHECK-NEXT:  entry:
5670*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
5671*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
5672*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssrarni.h.w(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]], i32 1)
5673*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
5674*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5675673c5308Schenli //
vssrarni_h_w(v8i16 _1,v8i16 _2)5676673c5308Schenli v8i16 vssrarni_h_w(v8i16 _1, v8i16 _2) { return __lsx_vssrarni_h_w(_1, _2, 1); }
5677673c5308Schenli // CHECK-LABEL: @vssrarni_w_d(
5678673c5308Schenli // CHECK-NEXT:  entry:
5679*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
5680*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
5681*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssrarni.w.d(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]], i32 1)
5682*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
5683*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5684673c5308Schenli //
vssrarni_w_d(v4i32 _1,v4i32 _2)5685673c5308Schenli v4i32 vssrarni_w_d(v4i32 _1, v4i32 _2) { return __lsx_vssrarni_w_d(_1, _2, 1); }
5686673c5308Schenli // CHECK-LABEL: @vssrarni_d_q(
5687673c5308Schenli // CHECK-NEXT:  entry:
5688*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
5689*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
5690*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vssrarni.d.q(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]], i32 1)
5691*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
5692*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5693673c5308Schenli //
vssrarni_d_q(v2i64 _1,v2i64 _2)5694673c5308Schenli v2i64 vssrarni_d_q(v2i64 _1, v2i64 _2) { return __lsx_vssrarni_d_q(_1, _2, 1); }
5695673c5308Schenli // CHECK-LABEL: @vssrarni_bu_h(
5696673c5308Schenli // CHECK-NEXT:  entry:
5697*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
5698*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
5699*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssrarni.bu.h(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]], i32 1)
5700*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
5701*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5702673c5308Schenli //
vssrarni_bu_h(v16u8 _1,v16i8 _2)5703673c5308Schenli v16u8 vssrarni_bu_h(v16u8 _1, v16i8 _2) {
5704673c5308Schenli   return __lsx_vssrarni_bu_h(_1, _2, 1);
5705673c5308Schenli }
5706673c5308Schenli // CHECK-LABEL: @vssrarni_hu_w(
5707673c5308Schenli // CHECK-NEXT:  entry:
5708*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
5709*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
5710*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssrarni.hu.w(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]], i32 1)
5711*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
5712*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5713673c5308Schenli //
vssrarni_hu_w(v8u16 _1,v8i16 _2)5714673c5308Schenli v8u16 vssrarni_hu_w(v8u16 _1, v8i16 _2) {
5715673c5308Schenli   return __lsx_vssrarni_hu_w(_1, _2, 1);
5716673c5308Schenli }
5717673c5308Schenli // CHECK-LABEL: @vssrarni_wu_d(
5718673c5308Schenli // CHECK-NEXT:  entry:
5719*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
5720*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
5721*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssrarni.wu.d(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]], i32 1)
5722*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
5723*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5724673c5308Schenli //
vssrarni_wu_d(v4u32 _1,v4i32 _2)5725673c5308Schenli v4u32 vssrarni_wu_d(v4u32 _1, v4i32 _2) {
5726673c5308Schenli   return __lsx_vssrarni_wu_d(_1, _2, 1);
5727673c5308Schenli }
5728673c5308Schenli // CHECK-LABEL: @vssrarni_du_q(
5729673c5308Schenli // CHECK-NEXT:  entry:
5730*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
5731*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
5732*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vssrarni.du.q(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]], i32 1)
5733*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
5734*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5735673c5308Schenli //
vssrarni_du_q(v2u64 _1,v2i64 _2)5736673c5308Schenli v2u64 vssrarni_du_q(v2u64 _1, v2i64 _2) {
5737673c5308Schenli   return __lsx_vssrarni_du_q(_1, _2, 1);
5738673c5308Schenli }
5739673c5308Schenli // CHECK-LABEL: @vpermi_w(
5740673c5308Schenli // CHECK-NEXT:  entry:
5741*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
5742*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
5743*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vpermi.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]], i32 1)
5744*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
5745*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5746673c5308Schenli //
vpermi_w(v4i32 _1,v4i32 _2)5747673c5308Schenli v4i32 vpermi_w(v4i32 _1, v4i32 _2) { return __lsx_vpermi_w(_1, _2, 1); }
5748673c5308Schenli // CHECK-LABEL: @vld(
5749673c5308Schenli // CHECK-NEXT:  entry:
5750673c5308Schenli // CHECK-NEXT:    [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vld(ptr [[_1:%.*]], i32 1)
5751*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to i128
5752*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP1]]
5753673c5308Schenli //
vld(void * _1)5754673c5308Schenli v16i8 vld(void *_1) { return __lsx_vld(_1, 1); }
5755673c5308Schenli // CHECK-LABEL: @vst(
5756673c5308Schenli // CHECK-NEXT:  entry:
5757*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
5758*0e01c72cSyjijd // CHECK-NEXT:    tail call void @llvm.loongarch.lsx.vst(<16 x i8> [[TMP0]], ptr [[_2:%.*]], i32 1)
5759673c5308Schenli // CHECK-NEXT:    ret void
5760673c5308Schenli //
vst(v16i8 _1,void * _2)5761673c5308Schenli void vst(v16i8 _1, void *_2) { return __lsx_vst(_1, _2, 1); }
5762673c5308Schenli // CHECK-LABEL: @vssrlrn_b_h(
5763673c5308Schenli // CHECK-NEXT:  entry:
5764*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
5765*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
5766*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssrlrn.b.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
5767*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
5768*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5769673c5308Schenli //
vssrlrn_b_h(v8i16 _1,v8i16 _2)5770673c5308Schenli v16i8 vssrlrn_b_h(v8i16 _1, v8i16 _2) { return __lsx_vssrlrn_b_h(_1, _2); }
5771673c5308Schenli // CHECK-LABEL: @vssrlrn_h_w(
5772673c5308Schenli // CHECK-NEXT:  entry:
5773*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
5774*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
5775*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssrlrn.h.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
5776*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
5777*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5778673c5308Schenli //
vssrlrn_h_w(v4i32 _1,v4i32 _2)5779673c5308Schenli v8i16 vssrlrn_h_w(v4i32 _1, v4i32 _2) { return __lsx_vssrlrn_h_w(_1, _2); }
5780673c5308Schenli // CHECK-LABEL: @vssrlrn_w_d(
5781673c5308Schenli // CHECK-NEXT:  entry:
5782*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
5783*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
5784*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssrlrn.w.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
5785*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
5786*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5787673c5308Schenli //
vssrlrn_w_d(v2i64 _1,v2i64 _2)5788673c5308Schenli v4i32 vssrlrn_w_d(v2i64 _1, v2i64 _2) { return __lsx_vssrlrn_w_d(_1, _2); }
5789673c5308Schenli // CHECK-LABEL: @vssrln_b_h(
5790673c5308Schenli // CHECK-NEXT:  entry:
5791*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
5792*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <8 x i16>
5793*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssrln.b.h(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
5794*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
5795*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5796673c5308Schenli //
vssrln_b_h(v8i16 _1,v8i16 _2)5797673c5308Schenli v16i8 vssrln_b_h(v8i16 _1, v8i16 _2) { return __lsx_vssrln_b_h(_1, _2); }
5798673c5308Schenli // CHECK-LABEL: @vssrln_h_w(
5799673c5308Schenli // CHECK-NEXT:  entry:
5800*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
5801*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x i32>
5802*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssrln.h.w(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
5803*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
5804*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5805673c5308Schenli //
vssrln_h_w(v4i32 _1,v4i32 _2)5806673c5308Schenli v8i16 vssrln_h_w(v4i32 _1, v4i32 _2) { return __lsx_vssrln_h_w(_1, _2); }
5807673c5308Schenli // CHECK-LABEL: @vssrln_w_d(
5808673c5308Schenli // CHECK-NEXT:  entry:
5809*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
5810*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x i64>
5811*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssrln.w.d(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
5812*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
5813*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5814673c5308Schenli //
vssrln_w_d(v2i64 _1,v2i64 _2)5815673c5308Schenli v4i32 vssrln_w_d(v2i64 _1, v2i64 _2) { return __lsx_vssrln_w_d(_1, _2); }
5816673c5308Schenli // CHECK-LABEL: @vorn_v(
5817673c5308Schenli // CHECK-NEXT:  entry:
5818*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
5819*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
5820*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vorn.v(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
5821*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
5822*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5823673c5308Schenli //
vorn_v(v16i8 _1,v16i8 _2)5824673c5308Schenli v16i8 vorn_v(v16i8 _1, v16i8 _2) { return __lsx_vorn_v(_1, _2); }
5825673c5308Schenli // CHECK-LABEL: @vldi(
5826673c5308Schenli // CHECK-NEXT:  entry:
5827673c5308Schenli // CHECK-NEXT:    [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vldi(i32 1)
5828*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to i128
5829*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP1]]
5830673c5308Schenli //
vldi()5831673c5308Schenli v2i64 vldi() { return __lsx_vldi(1); }
5832673c5308Schenli // CHECK-LABEL: @vshuf_b(
5833673c5308Schenli // CHECK-NEXT:  entry:
5834*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
5835*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <16 x i8>
5836*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast i128 [[_3_COERCE:%.*]] to <16 x i8>
5837*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vshuf.b(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]], <16 x i8> [[TMP2]])
5838*0e01c72cSyjijd // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <16 x i8> [[TMP3]] to i128
5839*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP4]]
5840673c5308Schenli //
vshuf_b(v16i8 _1,v16i8 _2,v16i8 _3)5841673c5308Schenli v16i8 vshuf_b(v16i8 _1, v16i8 _2, v16i8 _3) {
5842673c5308Schenli   return __lsx_vshuf_b(_1, _2, _3);
5843673c5308Schenli }
5844673c5308Schenli // CHECK-LABEL: @vldx(
5845673c5308Schenli // CHECK-NEXT:  entry:
5846673c5308Schenli // CHECK-NEXT:    [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vldx(ptr [[_1:%.*]], i64 1)
5847*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to i128
5848*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP1]]
5849673c5308Schenli //
vldx(void * _1)5850673c5308Schenli v16i8 vldx(void *_1) { return __lsx_vldx(_1, 1); }
5851673c5308Schenli // CHECK-LABEL: @vstx(
5852673c5308Schenli // CHECK-NEXT:  entry:
5853*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
5854*0e01c72cSyjijd // CHECK-NEXT:    tail call void @llvm.loongarch.lsx.vstx(<16 x i8> [[TMP0]], ptr [[_2:%.*]], i64 1)
5855673c5308Schenli // CHECK-NEXT:    ret void
5856673c5308Schenli //
vstx(v16i8 _1,void * _2)5857673c5308Schenli void vstx(v16i8 _1, void *_2) { return __lsx_vstx(_1, _2, 1); }
5858673c5308Schenli // CHECK-LABEL: @vextl_qu_du(
5859673c5308Schenli // CHECK-NEXT:  entry:
5860*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
5861*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vextl.qu.du(<2 x i64> [[TMP0]])
5862*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
5863*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP2]]
5864673c5308Schenli //
vextl_qu_du(v2u64 _1)5865673c5308Schenli v2u64 vextl_qu_du(v2u64 _1) { return __lsx_vextl_qu_du(_1); }
5866673c5308Schenli // CHECK-LABEL: @bnz_b(
5867673c5308Schenli // CHECK-NEXT:  entry:
5868*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
5869*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.loongarch.lsx.bnz.b(<16 x i8> [[TMP0]])
5870*0e01c72cSyjijd // CHECK-NEXT:    ret i32 [[TMP1]]
5871673c5308Schenli //
bnz_b(v16u8 _1)5872673c5308Schenli int bnz_b(v16u8 _1) { return __lsx_bnz_b(_1); }
5873673c5308Schenli // CHECK-LABEL: @bnz_d(
5874673c5308Schenli // CHECK-NEXT:  entry:
5875*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
5876*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.loongarch.lsx.bnz.d(<2 x i64> [[TMP0]])
5877*0e01c72cSyjijd // CHECK-NEXT:    ret i32 [[TMP1]]
5878673c5308Schenli //
bnz_d(v2u64 _1)5879673c5308Schenli int bnz_d(v2u64 _1) { return __lsx_bnz_d(_1); }
5880673c5308Schenli // CHECK-LABEL: @bnz_h(
5881673c5308Schenli // CHECK-NEXT:  entry:
5882*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
5883*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.loongarch.lsx.bnz.h(<8 x i16> [[TMP0]])
5884*0e01c72cSyjijd // CHECK-NEXT:    ret i32 [[TMP1]]
5885673c5308Schenli //
bnz_h(v8u16 _1)5886673c5308Schenli int bnz_h(v8u16 _1) { return __lsx_bnz_h(_1); }
5887673c5308Schenli // CHECK-LABEL: @bnz_v(
5888673c5308Schenli // CHECK-NEXT:  entry:
5889*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
5890*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.loongarch.lsx.bnz.v(<16 x i8> [[TMP0]])
5891*0e01c72cSyjijd // CHECK-NEXT:    ret i32 [[TMP1]]
5892673c5308Schenli //
bnz_v(v16u8 _1)5893673c5308Schenli int bnz_v(v16u8 _1) { return __lsx_bnz_v(_1); }
5894673c5308Schenli // CHECK-LABEL: @bnz_w(
5895673c5308Schenli // CHECK-NEXT:  entry:
5896*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
5897*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.loongarch.lsx.bnz.w(<4 x i32> [[TMP0]])
5898*0e01c72cSyjijd // CHECK-NEXT:    ret i32 [[TMP1]]
5899673c5308Schenli //
bnz_w(v4u32 _1)5900673c5308Schenli int bnz_w(v4u32 _1) { return __lsx_bnz_w(_1); }
5901673c5308Schenli // CHECK-LABEL: @bz_b(
5902673c5308Schenli // CHECK-NEXT:  entry:
5903*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
5904*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.loongarch.lsx.bz.b(<16 x i8> [[TMP0]])
5905*0e01c72cSyjijd // CHECK-NEXT:    ret i32 [[TMP1]]
5906673c5308Schenli //
bz_b(v16u8 _1)5907673c5308Schenli int bz_b(v16u8 _1) { return __lsx_bz_b(_1); }
5908673c5308Schenli // CHECK-LABEL: @bz_d(
5909673c5308Schenli // CHECK-NEXT:  entry:
5910*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x i64>
5911*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.loongarch.lsx.bz.d(<2 x i64> [[TMP0]])
5912*0e01c72cSyjijd // CHECK-NEXT:    ret i32 [[TMP1]]
5913673c5308Schenli //
bz_d(v2u64 _1)5914673c5308Schenli int bz_d(v2u64 _1) { return __lsx_bz_d(_1); }
5915673c5308Schenli // CHECK-LABEL: @bz_h(
5916673c5308Schenli // CHECK-NEXT:  entry:
5917*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <8 x i16>
5918*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.loongarch.lsx.bz.h(<8 x i16> [[TMP0]])
5919*0e01c72cSyjijd // CHECK-NEXT:    ret i32 [[TMP1]]
5920673c5308Schenli //
bz_h(v8u16 _1)5921673c5308Schenli int bz_h(v8u16 _1) { return __lsx_bz_h(_1); }
5922673c5308Schenli // CHECK-LABEL: @bz_v(
5923673c5308Schenli // CHECK-NEXT:  entry:
5924*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <16 x i8>
5925*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.loongarch.lsx.bz.v(<16 x i8> [[TMP0]])
5926*0e01c72cSyjijd // CHECK-NEXT:    ret i32 [[TMP1]]
5927673c5308Schenli //
bz_v(v16u8 _1)5928673c5308Schenli int bz_v(v16u8 _1) { return __lsx_bz_v(_1); }
5929673c5308Schenli // CHECK-LABEL: @bz_w(
5930673c5308Schenli // CHECK-NEXT:  entry:
5931*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x i32>
5932*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.loongarch.lsx.bz.w(<4 x i32> [[TMP0]])
5933*0e01c72cSyjijd // CHECK-NEXT:    ret i32 [[TMP1]]
5934673c5308Schenli //
bz_w(v4u32 _1)5935673c5308Schenli int bz_w(v4u32 _1) { return __lsx_bz_w(_1); }
5936673c5308Schenli // CHECK-LABEL: @vfcmp_caf_d(
5937673c5308Schenli // CHECK-NEXT:  entry:
5938*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
5939*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
5940*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.caf.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
5941*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
5942*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5943673c5308Schenli //
vfcmp_caf_d(v2f64 _1,v2f64 _2)5944673c5308Schenli v2i64 vfcmp_caf_d(v2f64 _1, v2f64 _2) { return __lsx_vfcmp_caf_d(_1, _2); }
5945673c5308Schenli // CHECK-LABEL: @vfcmp_caf_s(
5946673c5308Schenli // CHECK-NEXT:  entry:
5947*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
5948*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
5949*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.caf.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
5950*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
5951*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5952673c5308Schenli //
vfcmp_caf_s(v4f32 _1,v4f32 _2)5953673c5308Schenli v4i32 vfcmp_caf_s(v4f32 _1, v4f32 _2) { return __lsx_vfcmp_caf_s(_1, _2); }
5954673c5308Schenli // CHECK-LABEL: @vfcmp_ceq_d(
5955673c5308Schenli // CHECK-NEXT:  entry:
5956*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
5957*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
5958*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.ceq.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
5959*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
5960*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5961673c5308Schenli //
vfcmp_ceq_d(v2f64 _1,v2f64 _2)5962673c5308Schenli v2i64 vfcmp_ceq_d(v2f64 _1, v2f64 _2) { return __lsx_vfcmp_ceq_d(_1, _2); }
5963673c5308Schenli // CHECK-LABEL: @vfcmp_ceq_s(
5964673c5308Schenli // CHECK-NEXT:  entry:
5965*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
5966*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
5967*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.ceq.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
5968*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
5969*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5970673c5308Schenli //
vfcmp_ceq_s(v4f32 _1,v4f32 _2)5971673c5308Schenli v4i32 vfcmp_ceq_s(v4f32 _1, v4f32 _2) { return __lsx_vfcmp_ceq_s(_1, _2); }
5972673c5308Schenli // CHECK-LABEL: @vfcmp_cle_d(
5973673c5308Schenli // CHECK-NEXT:  entry:
5974*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
5975*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
5976*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.cle.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
5977*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
5978*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5979673c5308Schenli //
vfcmp_cle_d(v2f64 _1,v2f64 _2)5980673c5308Schenli v2i64 vfcmp_cle_d(v2f64 _1, v2f64 _2) { return __lsx_vfcmp_cle_d(_1, _2); }
5981673c5308Schenli // CHECK-LABEL: @vfcmp_cle_s(
5982673c5308Schenli // CHECK-NEXT:  entry:
5983*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
5984*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
5985*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.cle.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
5986*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
5987*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5988673c5308Schenli //
vfcmp_cle_s(v4f32 _1,v4f32 _2)5989673c5308Schenli v4i32 vfcmp_cle_s(v4f32 _1, v4f32 _2) { return __lsx_vfcmp_cle_s(_1, _2); }
5990673c5308Schenli // CHECK-LABEL: @vfcmp_clt_d(
5991673c5308Schenli // CHECK-NEXT:  entry:
5992*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
5993*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
5994*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.clt.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
5995*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
5996*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
5997673c5308Schenli //
vfcmp_clt_d(v2f64 _1,v2f64 _2)5998673c5308Schenli v2i64 vfcmp_clt_d(v2f64 _1, v2f64 _2) { return __lsx_vfcmp_clt_d(_1, _2); }
5999673c5308Schenli // CHECK-LABEL: @vfcmp_clt_s(
6000673c5308Schenli // CHECK-NEXT:  entry:
6001*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
6002*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
6003*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.clt.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
6004*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
6005*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6006673c5308Schenli //
vfcmp_clt_s(v4f32 _1,v4f32 _2)6007673c5308Schenli v4i32 vfcmp_clt_s(v4f32 _1, v4f32 _2) { return __lsx_vfcmp_clt_s(_1, _2); }
6008673c5308Schenli // CHECK-LABEL: @vfcmp_cne_d(
6009673c5308Schenli // CHECK-NEXT:  entry:
6010*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
6011*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
6012*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.cne.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
6013*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
6014*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6015673c5308Schenli //
vfcmp_cne_d(v2f64 _1,v2f64 _2)6016673c5308Schenli v2i64 vfcmp_cne_d(v2f64 _1, v2f64 _2) { return __lsx_vfcmp_cne_d(_1, _2); }
6017673c5308Schenli // CHECK-LABEL: @vfcmp_cne_s(
6018673c5308Schenli // CHECK-NEXT:  entry:
6019*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
6020*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
6021*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.cne.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
6022*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
6023*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6024673c5308Schenli //
vfcmp_cne_s(v4f32 _1,v4f32 _2)6025673c5308Schenli v4i32 vfcmp_cne_s(v4f32 _1, v4f32 _2) { return __lsx_vfcmp_cne_s(_1, _2); }
6026673c5308Schenli // CHECK-LABEL: @vfcmp_cor_d(
6027673c5308Schenli // CHECK-NEXT:  entry:
6028*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
6029*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
6030*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.cor.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
6031*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
6032*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6033673c5308Schenli //
vfcmp_cor_d(v2f64 _1,v2f64 _2)6034673c5308Schenli v2i64 vfcmp_cor_d(v2f64 _1, v2f64 _2) { return __lsx_vfcmp_cor_d(_1, _2); }
6035673c5308Schenli // CHECK-LABEL: @vfcmp_cor_s(
6036673c5308Schenli // CHECK-NEXT:  entry:
6037*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
6038*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
6039*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.cor.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
6040*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
6041*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6042673c5308Schenli //
vfcmp_cor_s(v4f32 _1,v4f32 _2)6043673c5308Schenli v4i32 vfcmp_cor_s(v4f32 _1, v4f32 _2) { return __lsx_vfcmp_cor_s(_1, _2); }
6044673c5308Schenli // CHECK-LABEL: @vfcmp_cueq_d(
6045673c5308Schenli // CHECK-NEXT:  entry:
6046*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
6047*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
6048*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.cueq.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
6049*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
6050*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6051673c5308Schenli //
vfcmp_cueq_d(v2f64 _1,v2f64 _2)6052673c5308Schenli v2i64 vfcmp_cueq_d(v2f64 _1, v2f64 _2) { return __lsx_vfcmp_cueq_d(_1, _2); }
6053673c5308Schenli // CHECK-LABEL: @vfcmp_cueq_s(
6054673c5308Schenli // CHECK-NEXT:  entry:
6055*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
6056*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
6057*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.cueq.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
6058*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
6059*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6060673c5308Schenli //
vfcmp_cueq_s(v4f32 _1,v4f32 _2)6061673c5308Schenli v4i32 vfcmp_cueq_s(v4f32 _1, v4f32 _2) { return __lsx_vfcmp_cueq_s(_1, _2); }
6062673c5308Schenli // CHECK-LABEL: @vfcmp_cule_d(
6063673c5308Schenli // CHECK-NEXT:  entry:
6064*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
6065*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
6066*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.cule.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
6067*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
6068*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6069673c5308Schenli //
vfcmp_cule_d(v2f64 _1,v2f64 _2)6070673c5308Schenli v2i64 vfcmp_cule_d(v2f64 _1, v2f64 _2) { return __lsx_vfcmp_cule_d(_1, _2); }
6071673c5308Schenli // CHECK-LABEL: @vfcmp_cule_s(
6072673c5308Schenli // CHECK-NEXT:  entry:
6073*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
6074*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
6075*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.cule.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
6076*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
6077*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6078673c5308Schenli //
vfcmp_cule_s(v4f32 _1,v4f32 _2)6079673c5308Schenli v4i32 vfcmp_cule_s(v4f32 _1, v4f32 _2) { return __lsx_vfcmp_cule_s(_1, _2); }
6080673c5308Schenli // CHECK-LABEL: @vfcmp_cult_d(
6081673c5308Schenli // CHECK-NEXT:  entry:
6082*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
6083*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
6084*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.cult.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
6085*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
6086*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6087673c5308Schenli //
vfcmp_cult_d(v2f64 _1,v2f64 _2)6088673c5308Schenli v2i64 vfcmp_cult_d(v2f64 _1, v2f64 _2) { return __lsx_vfcmp_cult_d(_1, _2); }
6089673c5308Schenli // CHECK-LABEL: @vfcmp_cult_s(
6090673c5308Schenli // CHECK-NEXT:  entry:
6091*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
6092*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
6093*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.cult.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
6094*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
6095*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6096673c5308Schenli //
vfcmp_cult_s(v4f32 _1,v4f32 _2)6097673c5308Schenli v4i32 vfcmp_cult_s(v4f32 _1, v4f32 _2) { return __lsx_vfcmp_cult_s(_1, _2); }
6098673c5308Schenli // CHECK-LABEL: @vfcmp_cun_d(
6099673c5308Schenli // CHECK-NEXT:  entry:
6100*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
6101*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
6102*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.cun.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
6103*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
6104*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6105673c5308Schenli //
vfcmp_cun_d(v2f64 _1,v2f64 _2)6106673c5308Schenli v2i64 vfcmp_cun_d(v2f64 _1, v2f64 _2) { return __lsx_vfcmp_cun_d(_1, _2); }
6107673c5308Schenli // CHECK-LABEL: @vfcmp_cune_d(
6108673c5308Schenli // CHECK-NEXT:  entry:
6109*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
6110*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
6111*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.cune.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
6112*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
6113*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6114673c5308Schenli //
vfcmp_cune_d(v2f64 _1,v2f64 _2)6115673c5308Schenli v2i64 vfcmp_cune_d(v2f64 _1, v2f64 _2) { return __lsx_vfcmp_cune_d(_1, _2); }
6116673c5308Schenli // CHECK-LABEL: @vfcmp_cune_s(
6117673c5308Schenli // CHECK-NEXT:  entry:
6118*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
6119*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
6120*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.cune.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
6121*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
6122*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6123673c5308Schenli //
vfcmp_cune_s(v4f32 _1,v4f32 _2)6124673c5308Schenli v4i32 vfcmp_cune_s(v4f32 _1, v4f32 _2) { return __lsx_vfcmp_cune_s(_1, _2); }
6125673c5308Schenli // CHECK-LABEL: @vfcmp_cun_s(
6126673c5308Schenli // CHECK-NEXT:  entry:
6127*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
6128*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
6129*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.cun.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
6130*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
6131*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6132673c5308Schenli //
vfcmp_cun_s(v4f32 _1,v4f32 _2)6133673c5308Schenli v4i32 vfcmp_cun_s(v4f32 _1, v4f32 _2) { return __lsx_vfcmp_cun_s(_1, _2); }
6134673c5308Schenli // CHECK-LABEL: @vfcmp_saf_d(
6135673c5308Schenli // CHECK-NEXT:  entry:
6136*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
6137*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
6138*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.saf.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
6139*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
6140*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6141673c5308Schenli //
vfcmp_saf_d(v2f64 _1,v2f64 _2)6142673c5308Schenli v2i64 vfcmp_saf_d(v2f64 _1, v2f64 _2) { return __lsx_vfcmp_saf_d(_1, _2); }
6143673c5308Schenli // CHECK-LABEL: @vfcmp_saf_s(
6144673c5308Schenli // CHECK-NEXT:  entry:
6145*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
6146*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
6147*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.saf.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
6148*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
6149*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6150673c5308Schenli //
vfcmp_saf_s(v4f32 _1,v4f32 _2)6151673c5308Schenli v4i32 vfcmp_saf_s(v4f32 _1, v4f32 _2) { return __lsx_vfcmp_saf_s(_1, _2); }
6152673c5308Schenli // CHECK-LABEL: @vfcmp_seq_d(
6153673c5308Schenli // CHECK-NEXT:  entry:
6154*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
6155*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
6156*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.seq.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
6157*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
6158*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6159673c5308Schenli //
vfcmp_seq_d(v2f64 _1,v2f64 _2)6160673c5308Schenli v2i64 vfcmp_seq_d(v2f64 _1, v2f64 _2) { return __lsx_vfcmp_seq_d(_1, _2); }
6161673c5308Schenli // CHECK-LABEL: @vfcmp_seq_s(
6162673c5308Schenli // CHECK-NEXT:  entry:
6163*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
6164*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
6165*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.seq.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
6166*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
6167*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6168673c5308Schenli //
vfcmp_seq_s(v4f32 _1,v4f32 _2)6169673c5308Schenli v4i32 vfcmp_seq_s(v4f32 _1, v4f32 _2) { return __lsx_vfcmp_seq_s(_1, _2); }
6170673c5308Schenli // CHECK-LABEL: @vfcmp_sle_d(
6171673c5308Schenli // CHECK-NEXT:  entry:
6172*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
6173*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
6174*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.sle.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
6175*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
6176*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6177673c5308Schenli //
vfcmp_sle_d(v2f64 _1,v2f64 _2)6178673c5308Schenli v2i64 vfcmp_sle_d(v2f64 _1, v2f64 _2) { return __lsx_vfcmp_sle_d(_1, _2); }
6179673c5308Schenli // CHECK-LABEL: @vfcmp_sle_s(
6180673c5308Schenli // CHECK-NEXT:  entry:
6181*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
6182*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
6183*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.sle.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
6184*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
6185*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6186673c5308Schenli //
vfcmp_sle_s(v4f32 _1,v4f32 _2)6187673c5308Schenli v4i32 vfcmp_sle_s(v4f32 _1, v4f32 _2) { return __lsx_vfcmp_sle_s(_1, _2); }
6188673c5308Schenli // CHECK-LABEL: @vfcmp_slt_d(
6189673c5308Schenli // CHECK-NEXT:  entry:
6190*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
6191*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
6192*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.slt.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
6193*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
6194*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6195673c5308Schenli //
vfcmp_slt_d(v2f64 _1,v2f64 _2)6196673c5308Schenli v2i64 vfcmp_slt_d(v2f64 _1, v2f64 _2) { return __lsx_vfcmp_slt_d(_1, _2); }
6197673c5308Schenli // CHECK-LABEL: @vfcmp_slt_s(
6198673c5308Schenli // CHECK-NEXT:  entry:
6199*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
6200*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
6201*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.slt.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
6202*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
6203*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6204673c5308Schenli //
vfcmp_slt_s(v4f32 _1,v4f32 _2)6205673c5308Schenli v4i32 vfcmp_slt_s(v4f32 _1, v4f32 _2) { return __lsx_vfcmp_slt_s(_1, _2); }
6206673c5308Schenli // CHECK-LABEL: @vfcmp_sne_d(
6207673c5308Schenli // CHECK-NEXT:  entry:
6208*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
6209*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
6210*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.sne.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
6211*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
6212*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6213673c5308Schenli //
vfcmp_sne_d(v2f64 _1,v2f64 _2)6214673c5308Schenli v2i64 vfcmp_sne_d(v2f64 _1, v2f64 _2) { return __lsx_vfcmp_sne_d(_1, _2); }
6215673c5308Schenli // CHECK-LABEL: @vfcmp_sne_s(
6216673c5308Schenli // CHECK-NEXT:  entry:
6217*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
6218*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
6219*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.sne.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
6220*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
6221*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6222673c5308Schenli //
vfcmp_sne_s(v4f32 _1,v4f32 _2)6223673c5308Schenli v4i32 vfcmp_sne_s(v4f32 _1, v4f32 _2) { return __lsx_vfcmp_sne_s(_1, _2); }
6224673c5308Schenli // CHECK-LABEL: @vfcmp_sor_d(
6225673c5308Schenli // CHECK-NEXT:  entry:
6226*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
6227*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
6228*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.sor.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
6229*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
6230*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6231673c5308Schenli //
vfcmp_sor_d(v2f64 _1,v2f64 _2)6232673c5308Schenli v2i64 vfcmp_sor_d(v2f64 _1, v2f64 _2) { return __lsx_vfcmp_sor_d(_1, _2); }
6233673c5308Schenli // CHECK-LABEL: @vfcmp_sor_s(
6234673c5308Schenli // CHECK-NEXT:  entry:
6235*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
6236*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
6237*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.sor.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
6238*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
6239*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6240673c5308Schenli //
vfcmp_sor_s(v4f32 _1,v4f32 _2)6241673c5308Schenli v4i32 vfcmp_sor_s(v4f32 _1, v4f32 _2) { return __lsx_vfcmp_sor_s(_1, _2); }
6242673c5308Schenli // CHECK-LABEL: @vfcmp_sueq_d(
6243673c5308Schenli // CHECK-NEXT:  entry:
6244*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
6245*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
6246*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.sueq.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
6247*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
6248*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6249673c5308Schenli //
vfcmp_sueq_d(v2f64 _1,v2f64 _2)6250673c5308Schenli v2i64 vfcmp_sueq_d(v2f64 _1, v2f64 _2) { return __lsx_vfcmp_sueq_d(_1, _2); }
6251673c5308Schenli // CHECK-LABEL: @vfcmp_sueq_s(
6252673c5308Schenli // CHECK-NEXT:  entry:
6253*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
6254*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
6255*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.sueq.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
6256*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
6257*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6258673c5308Schenli //
vfcmp_sueq_s(v4f32 _1,v4f32 _2)6259673c5308Schenli v4i32 vfcmp_sueq_s(v4f32 _1, v4f32 _2) { return __lsx_vfcmp_sueq_s(_1, _2); }
6260673c5308Schenli // CHECK-LABEL: @vfcmp_sule_d(
6261673c5308Schenli // CHECK-NEXT:  entry:
6262*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
6263*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
6264*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.sule.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
6265*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
6266*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6267673c5308Schenli //
vfcmp_sule_d(v2f64 _1,v2f64 _2)6268673c5308Schenli v2i64 vfcmp_sule_d(v2f64 _1, v2f64 _2) { return __lsx_vfcmp_sule_d(_1, _2); }
6269673c5308Schenli // CHECK-LABEL: @vfcmp_sule_s(
6270673c5308Schenli // CHECK-NEXT:  entry:
6271*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
6272*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
6273*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.sule.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
6274*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
6275*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6276673c5308Schenli //
vfcmp_sule_s(v4f32 _1,v4f32 _2)6277673c5308Schenli v4i32 vfcmp_sule_s(v4f32 _1, v4f32 _2) { return __lsx_vfcmp_sule_s(_1, _2); }
6278673c5308Schenli // CHECK-LABEL: @vfcmp_sult_d(
6279673c5308Schenli // CHECK-NEXT:  entry:
6280*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
6281*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
6282*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.sult.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
6283*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
6284*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6285673c5308Schenli //
vfcmp_sult_d(v2f64 _1,v2f64 _2)6286673c5308Schenli v2i64 vfcmp_sult_d(v2f64 _1, v2f64 _2) { return __lsx_vfcmp_sult_d(_1, _2); }
6287673c5308Schenli // CHECK-LABEL: @vfcmp_sult_s(
6288673c5308Schenli // CHECK-NEXT:  entry:
6289*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
6290*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
6291*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.sult.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
6292*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
6293*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6294673c5308Schenli //
vfcmp_sult_s(v4f32 _1,v4f32 _2)6295673c5308Schenli v4i32 vfcmp_sult_s(v4f32 _1, v4f32 _2) { return __lsx_vfcmp_sult_s(_1, _2); }
6296673c5308Schenli // CHECK-LABEL: @vfcmp_sun_d(
6297673c5308Schenli // CHECK-NEXT:  entry:
6298*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
6299*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
6300*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.sun.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
6301*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
6302*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6303673c5308Schenli //
vfcmp_sun_d(v2f64 _1,v2f64 _2)6304673c5308Schenli v2i64 vfcmp_sun_d(v2f64 _1, v2f64 _2) { return __lsx_vfcmp_sun_d(_1, _2); }
6305673c5308Schenli // CHECK-LABEL: @vfcmp_sune_d(
6306673c5308Schenli // CHECK-NEXT:  entry:
6307*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <2 x double>
6308*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <2 x double>
6309*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.sune.d(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
6310*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
6311*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6312673c5308Schenli //
vfcmp_sune_d(v2f64 _1,v2f64 _2)6313673c5308Schenli v2i64 vfcmp_sune_d(v2f64 _1, v2f64 _2) { return __lsx_vfcmp_sune_d(_1, _2); }
6314673c5308Schenli // CHECK-LABEL: @vfcmp_sune_s(
6315673c5308Schenli // CHECK-NEXT:  entry:
6316*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
6317*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
6318*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.sune.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
6319*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
6320*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6321673c5308Schenli //
vfcmp_sune_s(v4f32 _1,v4f32 _2)6322673c5308Schenli v4i32 vfcmp_sune_s(v4f32 _1, v4f32 _2) { return __lsx_vfcmp_sune_s(_1, _2); }
6323673c5308Schenli // CHECK-LABEL: @vfcmp_sun_s(
6324673c5308Schenli // CHECK-NEXT:  entry:
6325*0e01c72cSyjijd // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[_1_COERCE:%.*]] to <4 x float>
6326*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast i128 [[_2_COERCE:%.*]] to <4 x float>
6327*0e01c72cSyjijd // CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.sun.s(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
6328*0e01c72cSyjijd // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
6329*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP3]]
6330673c5308Schenli //
vfcmp_sun_s(v4f32 _1,v4f32 _2)6331673c5308Schenli v4i32 vfcmp_sun_s(v4f32 _1, v4f32 _2) { return __lsx_vfcmp_sun_s(_1, _2); }
6332673c5308Schenli // CHECK-LABEL: @vrepli_b(
6333673c5308Schenli // CHECK-NEXT:  entry:
6334673c5308Schenli // CHECK-NEXT:    [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vrepli.b(i32 1)
6335*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to i128
6336*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP1]]
6337673c5308Schenli //
vrepli_b()6338673c5308Schenli v16i8 vrepli_b() { return __lsx_vrepli_b(1); }
6339673c5308Schenli // CHECK-LABEL: @vrepli_d(
6340673c5308Schenli // CHECK-NEXT:  entry:
6341673c5308Schenli // CHECK-NEXT:    [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vrepli.d(i32 1)
6342*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to i128
6343*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP1]]
6344673c5308Schenli //
vrepli_d()6345673c5308Schenli v2i64 vrepli_d() { return __lsx_vrepli_d(1); }
6346673c5308Schenli // CHECK-LABEL: @vrepli_h(
6347673c5308Schenli // CHECK-NEXT:  entry:
6348673c5308Schenli // CHECK-NEXT:    [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vrepli.h(i32 1)
6349*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i16> [[TMP0]] to i128
6350*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP1]]
6351673c5308Schenli //
vrepli_h()6352673c5308Schenli v8i16 vrepli_h() { return __lsx_vrepli_h(1); }
6353673c5308Schenli // CHECK-LABEL: @vrepli_w(
6354673c5308Schenli // CHECK-NEXT:  entry:
6355673c5308Schenli // CHECK-NEXT:    [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vrepli.w(i32 1)
6356*0e01c72cSyjijd // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to i128
6357*0e01c72cSyjijd // CHECK-NEXT:    ret i128 [[TMP1]]
6358673c5308Schenli //
vrepli_w()6359673c5308Schenli v4i32 vrepli_w() { return __lsx_vrepli_w(1); }
6360