xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll (revision a6b870db091830844431f77eb47aa30fc1d70bed)
14b941ff4SPhilip Reames; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
24b941ff4SPhilip Reames; RUN: llc -mtriple=riscv32 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
34b941ff4SPhilip Reames; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
44b941ff4SPhilip Reames;
54b941ff4SPhilip Reames; SABD
64b941ff4SPhilip Reames;
74b941ff4SPhilip Reames
84b941ff4SPhilip Reamesdefine <8 x i8> @sabd_8b_as_16b(<8 x i8> %a, <8 x i8> %b) {
94b941ff4SPhilip Reames;
104b941ff4SPhilip Reames; CHECK-LABEL: sabd_8b_as_16b:
114b941ff4SPhilip Reames; CHECK:       # %bb.0:
124b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
13*a6b870dbSPhilip Reames; CHECK-NEXT:    vmin.vv v10, v8, v9
14*a6b870dbSPhilip Reames; CHECK-NEXT:    vmax.vv v8, v8, v9
15*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v8, v8, v10
164b941ff4SPhilip Reames; CHECK-NEXT:    ret
174b941ff4SPhilip Reames  %a.sext = sext <8 x i8> %a to <8 x i16>
184b941ff4SPhilip Reames  %b.sext = sext <8 x i8> %b to <8 x i16>
194b941ff4SPhilip Reames  %sub = sub <8 x i16> %a.sext, %b.sext
204b941ff4SPhilip Reames  %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
214b941ff4SPhilip Reames  %trunc = trunc <8 x i16> %abs to <8 x i8>
224b941ff4SPhilip Reames  ret <8 x i8> %trunc
234b941ff4SPhilip Reames}
244b941ff4SPhilip Reames
254b941ff4SPhilip Reamesdefine <8 x i8> @sabd_8b_as_32b(<8 x i8> %a, <8 x i8> %b) {
264b941ff4SPhilip Reames;
274b941ff4SPhilip Reames; CHECK-LABEL: sabd_8b_as_32b:
284b941ff4SPhilip Reames; CHECK:       # %bb.0:
29*a6b870dbSPhilip Reames; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
30*a6b870dbSPhilip Reames; CHECK-NEXT:    vmin.vv v10, v8, v9
31*a6b870dbSPhilip Reames; CHECK-NEXT:    vmax.vv v8, v8, v9
32*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v8, v8, v10
334b941ff4SPhilip Reames; CHECK-NEXT:    ret
344b941ff4SPhilip Reames  %a.sext = sext <8 x i8> %a to <8 x i32>
354b941ff4SPhilip Reames  %b.sext = sext <8 x i8> %b to <8 x i32>
364b941ff4SPhilip Reames  %sub = sub <8 x i32> %a.sext, %b.sext
374b941ff4SPhilip Reames  %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 true)
384b941ff4SPhilip Reames  %trunc = trunc <8 x i32> %abs to <8 x i8>
394b941ff4SPhilip Reames  ret <8 x i8> %trunc
404b941ff4SPhilip Reames}
414b941ff4SPhilip Reames
424b941ff4SPhilip Reamesdefine <16 x i8> @sabd_16b(<16 x i8> %a, <16 x i8> %b) {
434b941ff4SPhilip Reames;
444b941ff4SPhilip Reames; CHECK-LABEL: sabd_16b:
454b941ff4SPhilip Reames; CHECK:       # %bb.0:
464b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
47*a6b870dbSPhilip Reames; CHECK-NEXT:    vmin.vv v10, v8, v9
48*a6b870dbSPhilip Reames; CHECK-NEXT:    vmax.vv v8, v8, v9
49*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v8, v8, v10
504b941ff4SPhilip Reames; CHECK-NEXT:    ret
514b941ff4SPhilip Reames  %a.sext = sext <16 x i8> %a to <16 x i16>
524b941ff4SPhilip Reames  %b.sext = sext <16 x i8> %b to <16 x i16>
534b941ff4SPhilip Reames  %sub = sub <16 x i16> %a.sext, %b.sext
544b941ff4SPhilip Reames  %abs = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %sub, i1 true)
554b941ff4SPhilip Reames  %trunc = trunc <16 x i16> %abs to <16 x i8>
564b941ff4SPhilip Reames  ret <16 x i8> %trunc
574b941ff4SPhilip Reames}
584b941ff4SPhilip Reames
594b941ff4SPhilip Reamesdefine <4 x i16> @sabd_4h(<4 x i16> %a, <4 x i16> %b) {
604b941ff4SPhilip Reames;
614b941ff4SPhilip Reames; CHECK-LABEL: sabd_4h:
624b941ff4SPhilip Reames; CHECK:       # %bb.0:
634b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
64*a6b870dbSPhilip Reames; CHECK-NEXT:    vmin.vv v10, v8, v9
65*a6b870dbSPhilip Reames; CHECK-NEXT:    vmax.vv v8, v8, v9
66*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v8, v8, v10
674b941ff4SPhilip Reames; CHECK-NEXT:    ret
684b941ff4SPhilip Reames  %a.sext = sext <4 x i16> %a to <4 x i32>
694b941ff4SPhilip Reames  %b.sext = sext <4 x i16> %b to <4 x i32>
704b941ff4SPhilip Reames  %sub = sub <4 x i32> %a.sext, %b.sext
714b941ff4SPhilip Reames  %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
724b941ff4SPhilip Reames  %trunc = trunc <4 x i32> %abs to <4 x i16>
734b941ff4SPhilip Reames  ret <4 x i16> %trunc
744b941ff4SPhilip Reames}
754b941ff4SPhilip Reames
764b941ff4SPhilip Reamesdefine <4 x i16> @sabd_4h_promoted_ops(<4 x i8> %a, <4 x i8> %b) {
774b941ff4SPhilip Reames;
784b941ff4SPhilip Reames; CHECK-LABEL: sabd_4h_promoted_ops:
794b941ff4SPhilip Reames; CHECK:       # %bb.0:
804b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
81*a6b870dbSPhilip Reames; CHECK-NEXT:    vmin.vv v10, v8, v9
82*a6b870dbSPhilip Reames; CHECK-NEXT:    vmax.vv v8, v8, v9
83*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v9, v8, v10
844b941ff4SPhilip Reames; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
85*a6b870dbSPhilip Reames; CHECK-NEXT:    vzext.vf2 v8, v9
864b941ff4SPhilip Reames; CHECK-NEXT:    ret
874b941ff4SPhilip Reames  %a.sext = sext <4 x i8> %a to <4 x i16>
884b941ff4SPhilip Reames  %b.sext = sext <4 x i8> %b to <4 x i16>
894b941ff4SPhilip Reames  %sub = sub <4 x i16> %a.sext, %b.sext
904b941ff4SPhilip Reames  %abs = call <4 x i16> @llvm.abs.v4i16(<4 x i16> %sub, i1 true)
914b941ff4SPhilip Reames  ret <4 x i16> %abs
924b941ff4SPhilip Reames}
934b941ff4SPhilip Reames
944b941ff4SPhilip Reamesdefine <8 x i16> @sabd_8h(<8 x i16> %a, <8 x i16> %b) {
954b941ff4SPhilip Reames;
964b941ff4SPhilip Reames; CHECK-LABEL: sabd_8h:
974b941ff4SPhilip Reames; CHECK:       # %bb.0:
984b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
99*a6b870dbSPhilip Reames; CHECK-NEXT:    vmin.vv v10, v8, v9
100*a6b870dbSPhilip Reames; CHECK-NEXT:    vmax.vv v8, v8, v9
101*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v8, v8, v10
1024b941ff4SPhilip Reames; CHECK-NEXT:    ret
1034b941ff4SPhilip Reames  %a.sext = sext <8 x i16> %a to <8 x i32>
1044b941ff4SPhilip Reames  %b.sext = sext <8 x i16> %b to <8 x i32>
1054b941ff4SPhilip Reames  %sub = sub <8 x i32> %a.sext, %b.sext
1064b941ff4SPhilip Reames  %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 true)
1074b941ff4SPhilip Reames  %trunc = trunc <8 x i32> %abs to <8 x i16>
1084b941ff4SPhilip Reames  ret <8 x i16> %trunc
1094b941ff4SPhilip Reames}
1104b941ff4SPhilip Reames
1114b941ff4SPhilip Reamesdefine <8 x i16> @sabd_8h_promoted_ops(<8 x i8> %a, <8 x i8> %b) {
1124b941ff4SPhilip Reames;
1134b941ff4SPhilip Reames; CHECK-LABEL: sabd_8h_promoted_ops:
1144b941ff4SPhilip Reames; CHECK:       # %bb.0:
1154b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
116*a6b870dbSPhilip Reames; CHECK-NEXT:    vmin.vv v10, v8, v9
117*a6b870dbSPhilip Reames; CHECK-NEXT:    vmax.vv v8, v8, v9
118*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v9, v8, v10
1194b941ff4SPhilip Reames; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
120*a6b870dbSPhilip Reames; CHECK-NEXT:    vzext.vf2 v8, v9
1214b941ff4SPhilip Reames; CHECK-NEXT:    ret
1224b941ff4SPhilip Reames  %a.sext = sext <8 x i8> %a to <8 x i16>
1234b941ff4SPhilip Reames  %b.sext = sext <8 x i8> %b to <8 x i16>
1244b941ff4SPhilip Reames  %sub = sub <8 x i16> %a.sext, %b.sext
1254b941ff4SPhilip Reames  %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
1264b941ff4SPhilip Reames  ret <8 x i16> %abs
1274b941ff4SPhilip Reames}
1284b941ff4SPhilip Reames
1294b941ff4SPhilip Reamesdefine <2 x i32> @sabd_2s(<2 x i32> %a, <2 x i32> %b) {
1304b941ff4SPhilip Reames;
1314b941ff4SPhilip Reames; CHECK-LABEL: sabd_2s:
1324b941ff4SPhilip Reames; CHECK:       # %bb.0:
1334b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
134*a6b870dbSPhilip Reames; CHECK-NEXT:    vmin.vv v10, v8, v9
135*a6b870dbSPhilip Reames; CHECK-NEXT:    vmax.vv v8, v8, v9
136*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v8, v8, v10
1374b941ff4SPhilip Reames; CHECK-NEXT:    ret
1384b941ff4SPhilip Reames  %a.sext = sext <2 x i32> %a to <2 x i64>
1394b941ff4SPhilip Reames  %b.sext = sext <2 x i32> %b to <2 x i64>
1404b941ff4SPhilip Reames  %sub = sub <2 x i64> %a.sext, %b.sext
1414b941ff4SPhilip Reames  %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
1424b941ff4SPhilip Reames  %trunc = trunc <2 x i64> %abs to <2 x i32>
1434b941ff4SPhilip Reames  ret <2 x i32> %trunc
1444b941ff4SPhilip Reames}
1454b941ff4SPhilip Reames
1464b941ff4SPhilip Reamesdefine <2 x i32> @sabd_2s_promoted_ops(<2 x i16> %a, <2 x i16> %b) {
1474b941ff4SPhilip Reames;
1484b941ff4SPhilip Reames; CHECK-LABEL: sabd_2s_promoted_ops:
1494b941ff4SPhilip Reames; CHECK:       # %bb.0:
1504b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
151*a6b870dbSPhilip Reames; CHECK-NEXT:    vmin.vv v10, v8, v9
152*a6b870dbSPhilip Reames; CHECK-NEXT:    vmax.vv v8, v8, v9
153*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v9, v8, v10
1544b941ff4SPhilip Reames; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
155*a6b870dbSPhilip Reames; CHECK-NEXT:    vzext.vf2 v8, v9
1564b941ff4SPhilip Reames; CHECK-NEXT:    ret
1574b941ff4SPhilip Reames  %a.sext = sext <2 x i16> %a to <2 x i32>
1584b941ff4SPhilip Reames  %b.sext = sext <2 x i16> %b to <2 x i32>
1594b941ff4SPhilip Reames  %sub = sub <2 x i32> %a.sext, %b.sext
1604b941ff4SPhilip Reames  %abs = call <2 x i32> @llvm.abs.v2i32(<2 x i32> %sub, i1 true)
1614b941ff4SPhilip Reames  ret <2 x i32> %abs
1624b941ff4SPhilip Reames}
1634b941ff4SPhilip Reames
1644b941ff4SPhilip Reamesdefine <4 x i32> @sabd_4s(<4 x i32> %a, <4 x i32> %b) {
1654b941ff4SPhilip Reames;
1664b941ff4SPhilip Reames; CHECK-LABEL: sabd_4s:
1674b941ff4SPhilip Reames; CHECK:       # %bb.0:
1684b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
169*a6b870dbSPhilip Reames; CHECK-NEXT:    vmin.vv v10, v8, v9
170*a6b870dbSPhilip Reames; CHECK-NEXT:    vmax.vv v8, v8, v9
171*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v8, v8, v10
1724b941ff4SPhilip Reames; CHECK-NEXT:    ret
1734b941ff4SPhilip Reames  %a.sext = sext <4 x i32> %a to <4 x i64>
1744b941ff4SPhilip Reames  %b.sext = sext <4 x i32> %b to <4 x i64>
1754b941ff4SPhilip Reames  %sub = sub <4 x i64> %a.sext, %b.sext
1764b941ff4SPhilip Reames  %abs = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %sub, i1 true)
1774b941ff4SPhilip Reames  %trunc = trunc <4 x i64> %abs to <4 x i32>
1784b941ff4SPhilip Reames  ret <4 x i32> %trunc
1794b941ff4SPhilip Reames}
1804b941ff4SPhilip Reames
1814b941ff4SPhilip Reamesdefine <4 x i32> @sabd_4s_promoted_ops(<4 x i16> %a, <4 x i16> %b) {
1824b941ff4SPhilip Reames;
1834b941ff4SPhilip Reames; CHECK-LABEL: sabd_4s_promoted_ops:
1844b941ff4SPhilip Reames; CHECK:       # %bb.0:
1854b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
186*a6b870dbSPhilip Reames; CHECK-NEXT:    vmin.vv v10, v8, v9
187*a6b870dbSPhilip Reames; CHECK-NEXT:    vmax.vv v8, v8, v9
188*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v9, v8, v10
1894b941ff4SPhilip Reames; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
190*a6b870dbSPhilip Reames; CHECK-NEXT:    vzext.vf2 v8, v9
1914b941ff4SPhilip Reames; CHECK-NEXT:    ret
1924b941ff4SPhilip Reames  %a.sext = sext <4 x i16> %a to <4 x i32>
1934b941ff4SPhilip Reames  %b.sext = sext <4 x i16> %b to <4 x i32>
1944b941ff4SPhilip Reames  %sub = sub <4 x i32> %a.sext, %b.sext
1954b941ff4SPhilip Reames  %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
1964b941ff4SPhilip Reames  ret <4 x i32> %abs
1974b941ff4SPhilip Reames}
1984b941ff4SPhilip Reames
1994b941ff4SPhilip Reamesdefine <2 x i64> @sabd_2d(<2 x i64> %a, <2 x i64> %b) {
200*a6b870dbSPhilip Reames; CHECK-LABEL: sabd_2d:
201*a6b870dbSPhilip Reames; CHECK:       # %bb.0:
202*a6b870dbSPhilip Reames; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
203*a6b870dbSPhilip Reames; CHECK-NEXT:    vmin.vv v10, v8, v9
204*a6b870dbSPhilip Reames; CHECK-NEXT:    vmax.vv v8, v8, v9
205*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v8, v8, v10
206*a6b870dbSPhilip Reames; CHECK-NEXT:    ret
2074b941ff4SPhilip Reames  %a.sext = sext <2 x i64> %a to <2 x i128>
2084b941ff4SPhilip Reames  %b.sext = sext <2 x i64> %b to <2 x i128>
2094b941ff4SPhilip Reames  %sub = sub <2 x i128> %a.sext, %b.sext
2104b941ff4SPhilip Reames  %abs = call <2 x i128> @llvm.abs.v2i128(<2 x i128> %sub, i1 true)
2114b941ff4SPhilip Reames  %trunc = trunc <2 x i128> %abs to <2 x i64>
2124b941ff4SPhilip Reames  ret <2 x i64> %trunc
2134b941ff4SPhilip Reames}
2144b941ff4SPhilip Reames
2154b941ff4SPhilip Reamesdefine <2 x i64> @sabd_2d_promoted_ops(<2 x i32> %a, <2 x i32> %b) {
2164b941ff4SPhilip Reames;
2174b941ff4SPhilip Reames; CHECK-LABEL: sabd_2d_promoted_ops:
2184b941ff4SPhilip Reames; CHECK:       # %bb.0:
2194b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
220*a6b870dbSPhilip Reames; CHECK-NEXT:    vmin.vv v10, v8, v9
221*a6b870dbSPhilip Reames; CHECK-NEXT:    vmax.vv v8, v8, v9
222*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v9, v8, v10
2234b941ff4SPhilip Reames; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
224*a6b870dbSPhilip Reames; CHECK-NEXT:    vzext.vf2 v8, v9
2254b941ff4SPhilip Reames; CHECK-NEXT:    ret
2264b941ff4SPhilip Reames  %a.sext = sext <2 x i32> %a to <2 x i64>
2274b941ff4SPhilip Reames  %b.sext = sext <2 x i32> %b to <2 x i64>
2284b941ff4SPhilip Reames  %sub = sub <2 x i64> %a.sext, %b.sext
2294b941ff4SPhilip Reames  %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
2304b941ff4SPhilip Reames  ret <2 x i64> %abs
2314b941ff4SPhilip Reames}
2324b941ff4SPhilip Reames
2334b941ff4SPhilip Reames;
2344b941ff4SPhilip Reames; UABD
2354b941ff4SPhilip Reames;
2364b941ff4SPhilip Reames
2374b941ff4SPhilip Reamesdefine <8 x i8> @uabd_8b(<8 x i8> %a, <8 x i8> %b) {
2384b941ff4SPhilip Reames;
2394b941ff4SPhilip Reames; CHECK-LABEL: uabd_8b:
2404b941ff4SPhilip Reames; CHECK:       # %bb.0:
2414b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
242*a6b870dbSPhilip Reames; CHECK-NEXT:    vminu.vv v10, v8, v9
243*a6b870dbSPhilip Reames; CHECK-NEXT:    vmaxu.vv v8, v8, v9
244*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v8, v8, v10
2454b941ff4SPhilip Reames; CHECK-NEXT:    ret
2464b941ff4SPhilip Reames  %a.zext = zext <8 x i8> %a to <8 x i16>
2474b941ff4SPhilip Reames  %b.zext = zext <8 x i8> %b to <8 x i16>
2484b941ff4SPhilip Reames  %sub = sub <8 x i16> %a.zext, %b.zext
2494b941ff4SPhilip Reames  %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
2504b941ff4SPhilip Reames  %trunc = trunc <8 x i16> %abs to <8 x i8>
2514b941ff4SPhilip Reames  ret <8 x i8> %trunc
2524b941ff4SPhilip Reames}
2534b941ff4SPhilip Reames
2544b941ff4SPhilip Reamesdefine <16 x i8> @uabd_16b(<16 x i8> %a, <16 x i8> %b) {
2554b941ff4SPhilip Reames;
2564b941ff4SPhilip Reames; CHECK-LABEL: uabd_16b:
2574b941ff4SPhilip Reames; CHECK:       # %bb.0:
2584b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
259*a6b870dbSPhilip Reames; CHECK-NEXT:    vminu.vv v10, v8, v9
260*a6b870dbSPhilip Reames; CHECK-NEXT:    vmaxu.vv v8, v8, v9
261*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v8, v8, v10
2624b941ff4SPhilip Reames; CHECK-NEXT:    ret
2634b941ff4SPhilip Reames  %a.zext = zext <16 x i8> %a to <16 x i16>
2644b941ff4SPhilip Reames  %b.zext = zext <16 x i8> %b to <16 x i16>
2654b941ff4SPhilip Reames  %sub = sub <16 x i16> %a.zext, %b.zext
2664b941ff4SPhilip Reames  %abs = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %sub, i1 true)
2674b941ff4SPhilip Reames  %trunc = trunc <16 x i16> %abs to <16 x i8>
2684b941ff4SPhilip Reames  ret <16 x i8> %trunc
2694b941ff4SPhilip Reames}
2704b941ff4SPhilip Reames
2714b941ff4SPhilip Reamesdefine <4 x i16> @uabd_4h(<4 x i16> %a, <4 x i16> %b) {
2724b941ff4SPhilip Reames;
2734b941ff4SPhilip Reames; CHECK-LABEL: uabd_4h:
2744b941ff4SPhilip Reames; CHECK:       # %bb.0:
2754b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
276*a6b870dbSPhilip Reames; CHECK-NEXT:    vminu.vv v10, v8, v9
277*a6b870dbSPhilip Reames; CHECK-NEXT:    vmaxu.vv v8, v8, v9
278*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v8, v8, v10
2794b941ff4SPhilip Reames; CHECK-NEXT:    ret
2804b941ff4SPhilip Reames  %a.zext = zext <4 x i16> %a to <4 x i32>
2814b941ff4SPhilip Reames  %b.zext = zext <4 x i16> %b to <4 x i32>
2824b941ff4SPhilip Reames  %sub = sub <4 x i32> %a.zext, %b.zext
2834b941ff4SPhilip Reames  %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
2844b941ff4SPhilip Reames  %trunc = trunc <4 x i32> %abs to <4 x i16>
2854b941ff4SPhilip Reames  ret <4 x i16> %trunc
2864b941ff4SPhilip Reames}
2874b941ff4SPhilip Reames
2884b941ff4SPhilip Reamesdefine <4 x i16> @uabd_4h_promoted_ops(<4 x i8> %a, <4 x i8> %b) {
2894b941ff4SPhilip Reames;
2904b941ff4SPhilip Reames; CHECK-LABEL: uabd_4h_promoted_ops:
2914b941ff4SPhilip Reames; CHECK:       # %bb.0:
2924b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
293*a6b870dbSPhilip Reames; CHECK-NEXT:    vminu.vv v10, v8, v9
294*a6b870dbSPhilip Reames; CHECK-NEXT:    vmaxu.vv v8, v8, v9
295*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v9, v8, v10
2964b941ff4SPhilip Reames; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
297*a6b870dbSPhilip Reames; CHECK-NEXT:    vzext.vf2 v8, v9
2984b941ff4SPhilip Reames; CHECK-NEXT:    ret
2994b941ff4SPhilip Reames  %a.zext = zext <4 x i8> %a to <4 x i16>
3004b941ff4SPhilip Reames  %b.zext = zext <4 x i8> %b to <4 x i16>
3014b941ff4SPhilip Reames  %sub = sub <4 x i16> %a.zext, %b.zext
3024b941ff4SPhilip Reames  %abs = call <4 x i16> @llvm.abs.v4i16(<4 x i16> %sub, i1 true)
3034b941ff4SPhilip Reames  ret <4 x i16> %abs
3044b941ff4SPhilip Reames}
3054b941ff4SPhilip Reames
3064b941ff4SPhilip Reamesdefine <8 x i16> @uabd_8h(<8 x i16> %a, <8 x i16> %b) {
3074b941ff4SPhilip Reames;
3084b941ff4SPhilip Reames; CHECK-LABEL: uabd_8h:
3094b941ff4SPhilip Reames; CHECK:       # %bb.0:
3104b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
311*a6b870dbSPhilip Reames; CHECK-NEXT:    vminu.vv v10, v8, v9
312*a6b870dbSPhilip Reames; CHECK-NEXT:    vmaxu.vv v8, v8, v9
313*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v8, v8, v10
3144b941ff4SPhilip Reames; CHECK-NEXT:    ret
3154b941ff4SPhilip Reames  %a.zext = zext <8 x i16> %a to <8 x i32>
3164b941ff4SPhilip Reames  %b.zext = zext <8 x i16> %b to <8 x i32>
3174b941ff4SPhilip Reames  %sub = sub <8 x i32> %a.zext, %b.zext
3184b941ff4SPhilip Reames  %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 true)
3194b941ff4SPhilip Reames  %trunc = trunc <8 x i32> %abs to <8 x i16>
3204b941ff4SPhilip Reames  ret <8 x i16> %trunc
3214b941ff4SPhilip Reames}
3224b941ff4SPhilip Reames
3234b941ff4SPhilip Reamesdefine <8 x i16> @uabd_8h_promoted_ops(<8 x i8> %a, <8 x i8> %b) {
3244b941ff4SPhilip Reames;
3254b941ff4SPhilip Reames; CHECK-LABEL: uabd_8h_promoted_ops:
3264b941ff4SPhilip Reames; CHECK:       # %bb.0:
3274b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
328*a6b870dbSPhilip Reames; CHECK-NEXT:    vminu.vv v10, v8, v9
329*a6b870dbSPhilip Reames; CHECK-NEXT:    vmaxu.vv v8, v8, v9
330*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v9, v8, v10
3314b941ff4SPhilip Reames; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
332*a6b870dbSPhilip Reames; CHECK-NEXT:    vzext.vf2 v8, v9
3334b941ff4SPhilip Reames; CHECK-NEXT:    ret
3344b941ff4SPhilip Reames  %a.zext = zext <8 x i8> %a to <8 x i16>
3354b941ff4SPhilip Reames  %b.zext = zext <8 x i8> %b to <8 x i16>
3364b941ff4SPhilip Reames  %sub = sub <8 x i16> %a.zext, %b.zext
3374b941ff4SPhilip Reames  %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
3384b941ff4SPhilip Reames  ret <8 x i16> %abs
3394b941ff4SPhilip Reames}
3404b941ff4SPhilip Reames
3414b941ff4SPhilip Reamesdefine <2 x i32> @uabd_2s(<2 x i32> %a, <2 x i32> %b) {
3424b941ff4SPhilip Reames;
3434b941ff4SPhilip Reames; CHECK-LABEL: uabd_2s:
3444b941ff4SPhilip Reames; CHECK:       # %bb.0:
3454b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
346*a6b870dbSPhilip Reames; CHECK-NEXT:    vminu.vv v10, v8, v9
347*a6b870dbSPhilip Reames; CHECK-NEXT:    vmaxu.vv v8, v8, v9
348*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v8, v8, v10
3494b941ff4SPhilip Reames; CHECK-NEXT:    ret
3504b941ff4SPhilip Reames  %a.zext = zext <2 x i32> %a to <2 x i64>
3514b941ff4SPhilip Reames  %b.zext = zext <2 x i32> %b to <2 x i64>
3524b941ff4SPhilip Reames  %sub = sub <2 x i64> %a.zext, %b.zext
3534b941ff4SPhilip Reames  %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
3544b941ff4SPhilip Reames  %trunc = trunc <2 x i64> %abs to <2 x i32>
3554b941ff4SPhilip Reames  ret <2 x i32> %trunc
3564b941ff4SPhilip Reames}
3574b941ff4SPhilip Reames
3584b941ff4SPhilip Reamesdefine <2 x i32> @uabd_2s_promoted_ops(<2 x i16> %a, <2 x i16> %b) {
3594b941ff4SPhilip Reames;
3604b941ff4SPhilip Reames; CHECK-LABEL: uabd_2s_promoted_ops:
3614b941ff4SPhilip Reames; CHECK:       # %bb.0:
3624b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
363*a6b870dbSPhilip Reames; CHECK-NEXT:    vminu.vv v10, v8, v9
364*a6b870dbSPhilip Reames; CHECK-NEXT:    vmaxu.vv v8, v8, v9
365*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v9, v8, v10
3664b941ff4SPhilip Reames; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
367*a6b870dbSPhilip Reames; CHECK-NEXT:    vzext.vf2 v8, v9
3684b941ff4SPhilip Reames; CHECK-NEXT:    ret
3694b941ff4SPhilip Reames  %a.zext = zext <2 x i16> %a to <2 x i32>
3704b941ff4SPhilip Reames  %b.zext = zext <2 x i16> %b to <2 x i32>
3714b941ff4SPhilip Reames  %sub = sub <2 x i32> %a.zext, %b.zext
3724b941ff4SPhilip Reames  %abs = call <2 x i32> @llvm.abs.v2i32(<2 x i32> %sub, i1 true)
3734b941ff4SPhilip Reames  ret <2 x i32> %abs
3744b941ff4SPhilip Reames}
3754b941ff4SPhilip Reames
3764b941ff4SPhilip Reamesdefine <4 x i32> @uabd_4s(<4 x i32> %a, <4 x i32> %b) {
3774b941ff4SPhilip Reames;
3784b941ff4SPhilip Reames; CHECK-LABEL: uabd_4s:
3794b941ff4SPhilip Reames; CHECK:       # %bb.0:
3804b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
381*a6b870dbSPhilip Reames; CHECK-NEXT:    vminu.vv v10, v8, v9
382*a6b870dbSPhilip Reames; CHECK-NEXT:    vmaxu.vv v8, v8, v9
383*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v8, v8, v10
3844b941ff4SPhilip Reames; CHECK-NEXT:    ret
3854b941ff4SPhilip Reames  %a.zext = zext <4 x i32> %a to <4 x i64>
3864b941ff4SPhilip Reames  %b.zext = zext <4 x i32> %b to <4 x i64>
3874b941ff4SPhilip Reames  %sub = sub <4 x i64> %a.zext, %b.zext
3884b941ff4SPhilip Reames  %abs = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %sub, i1 true)
3894b941ff4SPhilip Reames  %trunc = trunc <4 x i64> %abs to <4 x i32>
3904b941ff4SPhilip Reames  ret <4 x i32> %trunc
3914b941ff4SPhilip Reames}
3924b941ff4SPhilip Reames
3934b941ff4SPhilip Reamesdefine <4 x i32> @uabd_4s_promoted_ops(<4 x i16> %a, <4 x i16> %b) {
3944b941ff4SPhilip Reames;
3954b941ff4SPhilip Reames; CHECK-LABEL: uabd_4s_promoted_ops:
3964b941ff4SPhilip Reames; CHECK:       # %bb.0:
3974b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
398*a6b870dbSPhilip Reames; CHECK-NEXT:    vminu.vv v10, v8, v9
399*a6b870dbSPhilip Reames; CHECK-NEXT:    vmaxu.vv v8, v8, v9
400*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v9, v8, v10
4014b941ff4SPhilip Reames; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
402*a6b870dbSPhilip Reames; CHECK-NEXT:    vzext.vf2 v8, v9
4034b941ff4SPhilip Reames; CHECK-NEXT:    ret
4044b941ff4SPhilip Reames  %a.zext = zext <4 x i16> %a to <4 x i32>
4054b941ff4SPhilip Reames  %b.zext = zext <4 x i16> %b to <4 x i32>
4064b941ff4SPhilip Reames  %sub = sub <4 x i32> %a.zext, %b.zext
4074b941ff4SPhilip Reames  %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
4084b941ff4SPhilip Reames  ret <4 x i32> %abs
4094b941ff4SPhilip Reames}
4104b941ff4SPhilip Reames
4114b941ff4SPhilip Reamesdefine <2 x i64> @uabd_2d(<2 x i64> %a, <2 x i64> %b) {
412*a6b870dbSPhilip Reames; CHECK-LABEL: uabd_2d:
413*a6b870dbSPhilip Reames; CHECK:       # %bb.0:
414*a6b870dbSPhilip Reames; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
415*a6b870dbSPhilip Reames; CHECK-NEXT:    vminu.vv v10, v8, v9
416*a6b870dbSPhilip Reames; CHECK-NEXT:    vmaxu.vv v8, v8, v9
417*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v8, v8, v10
418*a6b870dbSPhilip Reames; CHECK-NEXT:    ret
4194b941ff4SPhilip Reames  %a.zext = zext <2 x i64> %a to <2 x i128>
4204b941ff4SPhilip Reames  %b.zext = zext <2 x i64> %b to <2 x i128>
4214b941ff4SPhilip Reames  %sub = sub <2 x i128> %a.zext, %b.zext
4224b941ff4SPhilip Reames  %abs = call <2 x i128> @llvm.abs.v2i128(<2 x i128> %sub, i1 true)
4234b941ff4SPhilip Reames  %trunc = trunc <2 x i128> %abs to <2 x i64>
4244b941ff4SPhilip Reames  ret <2 x i64> %trunc
4254b941ff4SPhilip Reames}
4264b941ff4SPhilip Reames
4274b941ff4SPhilip Reamesdefine <2 x i64> @uabd_2d_promoted_ops(<2 x i32> %a, <2 x i32> %b) {
4284b941ff4SPhilip Reames;
4294b941ff4SPhilip Reames; CHECK-LABEL: uabd_2d_promoted_ops:
4304b941ff4SPhilip Reames; CHECK:       # %bb.0:
4314b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
432*a6b870dbSPhilip Reames; CHECK-NEXT:    vminu.vv v10, v8, v9
433*a6b870dbSPhilip Reames; CHECK-NEXT:    vmaxu.vv v8, v8, v9
434*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v9, v8, v10
4354b941ff4SPhilip Reames; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
436*a6b870dbSPhilip Reames; CHECK-NEXT:    vzext.vf2 v8, v9
4374b941ff4SPhilip Reames; CHECK-NEXT:    ret
4384b941ff4SPhilip Reames  %a.zext = zext <2 x i32> %a to <2 x i64>
4394b941ff4SPhilip Reames  %b.zext = zext <2 x i32> %b to <2 x i64>
4404b941ff4SPhilip Reames  %sub = sub <2 x i64> %a.zext, %b.zext
4414b941ff4SPhilip Reames  %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
4424b941ff4SPhilip Reames  ret <2 x i64> %abs
4434b941ff4SPhilip Reames}
4444b941ff4SPhilip Reames
4454b941ff4SPhilip Reamesdefine <16 x i8> @uabd_v16i8_nuw(<16 x i8> %a, <16 x i8> %b) {
4464b941ff4SPhilip Reames;
4474b941ff4SPhilip Reames; CHECK-LABEL: uabd_v16i8_nuw:
4484b941ff4SPhilip Reames; CHECK:       # %bb.0:
4494b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
4504b941ff4SPhilip Reames; CHECK-NEXT:    vsub.vv v8, v8, v9
4514b941ff4SPhilip Reames; CHECK-NEXT:    vrsub.vi v9, v8, 0
4524b941ff4SPhilip Reames; CHECK-NEXT:    vmax.vv v8, v8, v9
4534b941ff4SPhilip Reames; CHECK-NEXT:    ret
4544b941ff4SPhilip Reames  %sub = sub nuw <16 x i8> %a, %b
4554b941ff4SPhilip Reames  %abs = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %sub, i1 true)
4564b941ff4SPhilip Reames  ret <16 x i8> %abs
4574b941ff4SPhilip Reames}
4584b941ff4SPhilip Reames
4594b941ff4SPhilip Reamesdefine <8 x i16> @uabd_v8i16_nuw(<8 x i16> %a, <8 x i16> %b) {
4604b941ff4SPhilip Reames;
4614b941ff4SPhilip Reames; CHECK-LABEL: uabd_v8i16_nuw:
4624b941ff4SPhilip Reames; CHECK:       # %bb.0:
4634b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
4644b941ff4SPhilip Reames; CHECK-NEXT:    vsub.vv v8, v8, v9
4654b941ff4SPhilip Reames; CHECK-NEXT:    vrsub.vi v9, v8, 0
4664b941ff4SPhilip Reames; CHECK-NEXT:    vmax.vv v8, v8, v9
4674b941ff4SPhilip Reames; CHECK-NEXT:    ret
4684b941ff4SPhilip Reames  %sub = sub nuw <8 x i16> %a, %b
4694b941ff4SPhilip Reames  %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
4704b941ff4SPhilip Reames  ret <8 x i16> %abs
4714b941ff4SPhilip Reames}
4724b941ff4SPhilip Reames
4734b941ff4SPhilip Reamesdefine <4 x i32> @uabd_v4i32_nuw(<4 x i32> %a, <4 x i32> %b) {
4744b941ff4SPhilip Reames;
4754b941ff4SPhilip Reames; CHECK-LABEL: uabd_v4i32_nuw:
4764b941ff4SPhilip Reames; CHECK:       # %bb.0:
4774b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
4784b941ff4SPhilip Reames; CHECK-NEXT:    vsub.vv v8, v8, v9
4794b941ff4SPhilip Reames; CHECK-NEXT:    vrsub.vi v9, v8, 0
4804b941ff4SPhilip Reames; CHECK-NEXT:    vmax.vv v8, v8, v9
4814b941ff4SPhilip Reames; CHECK-NEXT:    ret
4824b941ff4SPhilip Reames  %sub = sub nuw <4 x i32> %a, %b
4834b941ff4SPhilip Reames  %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
4844b941ff4SPhilip Reames  ret <4 x i32> %abs
4854b941ff4SPhilip Reames}
4864b941ff4SPhilip Reames
4874b941ff4SPhilip Reamesdefine <2 x i64> @uabd_v2i64_nuw(<2 x i64> %a, <2 x i64> %b) {
4884b941ff4SPhilip Reames;
4894b941ff4SPhilip Reames; CHECK-LABEL: uabd_v2i64_nuw:
4904b941ff4SPhilip Reames; CHECK:       # %bb.0:
4914b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
4924b941ff4SPhilip Reames; CHECK-NEXT:    vsub.vv v8, v8, v9
4934b941ff4SPhilip Reames; CHECK-NEXT:    vrsub.vi v9, v8, 0
4944b941ff4SPhilip Reames; CHECK-NEXT:    vmax.vv v8, v8, v9
4954b941ff4SPhilip Reames; CHECK-NEXT:    ret
4964b941ff4SPhilip Reames  %sub = sub nuw <2 x i64> %a, %b
4974b941ff4SPhilip Reames  %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
4984b941ff4SPhilip Reames  ret <2 x i64> %abs
4994b941ff4SPhilip Reames}
5004b941ff4SPhilip Reames
5014b941ff4SPhilip Reamesdefine <16 x i8> @sabd_v16i8_nsw(<16 x i8> %a, <16 x i8> %b) {
5024b941ff4SPhilip Reames;
5034b941ff4SPhilip Reames; CHECK-LABEL: sabd_v16i8_nsw:
5044b941ff4SPhilip Reames; CHECK:       # %bb.0:
5054b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
506*a6b870dbSPhilip Reames; CHECK-NEXT:    vmin.vv v10, v8, v9
5074b941ff4SPhilip Reames; CHECK-NEXT:    vmax.vv v8, v8, v9
508*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v8, v8, v10
5094b941ff4SPhilip Reames; CHECK-NEXT:    ret
5104b941ff4SPhilip Reames  %sub = sub nsw <16 x i8> %a, %b
5114b941ff4SPhilip Reames  %abs = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %sub, i1 true)
5124b941ff4SPhilip Reames  ret <16 x i8> %abs
5134b941ff4SPhilip Reames}
5144b941ff4SPhilip Reames
5154b941ff4SPhilip Reamesdefine <8 x i16> @sabd_v8i16_nsw(<8 x i16> %a, <8 x i16> %b) {
5164b941ff4SPhilip Reames;
5174b941ff4SPhilip Reames; CHECK-LABEL: sabd_v8i16_nsw:
5184b941ff4SPhilip Reames; CHECK:       # %bb.0:
5194b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
520*a6b870dbSPhilip Reames; CHECK-NEXT:    vmin.vv v10, v8, v9
5214b941ff4SPhilip Reames; CHECK-NEXT:    vmax.vv v8, v8, v9
522*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v8, v8, v10
5234b941ff4SPhilip Reames; CHECK-NEXT:    ret
5244b941ff4SPhilip Reames  %sub = sub nsw <8 x i16> %a, %b
5254b941ff4SPhilip Reames  %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
5264b941ff4SPhilip Reames  ret <8 x i16> %abs
5274b941ff4SPhilip Reames}
5284b941ff4SPhilip Reames
5294b941ff4SPhilip Reamesdefine <4 x i32> @sabd_v4i32_nsw(<4 x i32> %a, <4 x i32> %b) {
5304b941ff4SPhilip Reames;
5314b941ff4SPhilip Reames; CHECK-LABEL: sabd_v4i32_nsw:
5324b941ff4SPhilip Reames; CHECK:       # %bb.0:
5334b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
534*a6b870dbSPhilip Reames; CHECK-NEXT:    vmin.vv v10, v8, v9
5354b941ff4SPhilip Reames; CHECK-NEXT:    vmax.vv v8, v8, v9
536*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v8, v8, v10
5374b941ff4SPhilip Reames; CHECK-NEXT:    ret
5384b941ff4SPhilip Reames  %sub = sub nsw <4 x i32> %a, %b
5394b941ff4SPhilip Reames  %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
5404b941ff4SPhilip Reames  ret <4 x i32> %abs
5414b941ff4SPhilip Reames}
5424b941ff4SPhilip Reames
5434b941ff4SPhilip Reamesdefine <2 x i64> @sabd_v2i64_nsw(<2 x i64> %a, <2 x i64> %b) {
5444b941ff4SPhilip Reames;
5454b941ff4SPhilip Reames; CHECK-LABEL: sabd_v2i64_nsw:
5464b941ff4SPhilip Reames; CHECK:       # %bb.0:
5474b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
548*a6b870dbSPhilip Reames; CHECK-NEXT:    vmin.vv v10, v8, v9
5494b941ff4SPhilip Reames; CHECK-NEXT:    vmax.vv v8, v8, v9
550*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v8, v8, v10
5514b941ff4SPhilip Reames; CHECK-NEXT:    ret
5524b941ff4SPhilip Reames  %sub = sub nsw <2 x i64> %a, %b
5534b941ff4SPhilip Reames  %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
5544b941ff4SPhilip Reames  ret <2 x i64> %abs
5554b941ff4SPhilip Reames}
5564b941ff4SPhilip Reames
5574b941ff4SPhilip Reamesdefine <16 x i8> @smaxmin_v16i8(<16 x i8> %0, <16 x i8> %1) {
5584b941ff4SPhilip Reames;
5594b941ff4SPhilip Reames; CHECK-LABEL: smaxmin_v16i8:
5604b941ff4SPhilip Reames; CHECK:       # %bb.0:
5614b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
562*a6b870dbSPhilip Reames; CHECK-NEXT:    vmin.vv v10, v8, v9
563*a6b870dbSPhilip Reames; CHECK-NEXT:    vmax.vv v8, v8, v9
564*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v8, v8, v10
5654b941ff4SPhilip Reames; CHECK-NEXT:    ret
5664b941ff4SPhilip Reames  %a = tail call <16 x i8> @llvm.smax.v16i8(<16 x i8> %0, <16 x i8> %1)
5674b941ff4SPhilip Reames  %b = tail call <16 x i8> @llvm.smin.v16i8(<16 x i8> %0, <16 x i8> %1)
5684b941ff4SPhilip Reames  %sub = sub <16 x i8> %a, %b
5694b941ff4SPhilip Reames  ret <16 x i8> %sub
5704b941ff4SPhilip Reames}
5714b941ff4SPhilip Reames
5724b941ff4SPhilip Reamesdefine <8 x i16> @smaxmin_v8i16(<8 x i16> %0, <8 x i16> %1) {
5734b941ff4SPhilip Reames;
5744b941ff4SPhilip Reames; CHECK-LABEL: smaxmin_v8i16:
5754b941ff4SPhilip Reames; CHECK:       # %bb.0:
5764b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
577*a6b870dbSPhilip Reames; CHECK-NEXT:    vmin.vv v10, v8, v9
578*a6b870dbSPhilip Reames; CHECK-NEXT:    vmax.vv v8, v8, v9
579*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v8, v8, v10
5804b941ff4SPhilip Reames; CHECK-NEXT:    ret
5814b941ff4SPhilip Reames  %a = tail call <8 x i16> @llvm.smax.v8i16(<8 x i16> %0, <8 x i16> %1)
5824b941ff4SPhilip Reames  %b = tail call <8 x i16> @llvm.smin.v8i16(<8 x i16> %0, <8 x i16> %1)
5834b941ff4SPhilip Reames  %sub = sub <8 x i16> %a, %b
5844b941ff4SPhilip Reames  ret <8 x i16> %sub
5854b941ff4SPhilip Reames}
5864b941ff4SPhilip Reames
5874b941ff4SPhilip Reamesdefine <4 x i32> @smaxmin_v4i32(<4 x i32> %0, <4 x i32> %1) {
5884b941ff4SPhilip Reames;
5894b941ff4SPhilip Reames; CHECK-LABEL: smaxmin_v4i32:
5904b941ff4SPhilip Reames; CHECK:       # %bb.0:
5914b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
592*a6b870dbSPhilip Reames; CHECK-NEXT:    vmin.vv v10, v8, v9
593*a6b870dbSPhilip Reames; CHECK-NEXT:    vmax.vv v8, v8, v9
594*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v8, v8, v10
5954b941ff4SPhilip Reames; CHECK-NEXT:    ret
5964b941ff4SPhilip Reames  %a = tail call <4 x i32> @llvm.smax.v4i32(<4 x i32> %0, <4 x i32> %1)
5974b941ff4SPhilip Reames  %b = tail call <4 x i32> @llvm.smin.v4i32(<4 x i32> %0, <4 x i32> %1)
5984b941ff4SPhilip Reames  %sub = sub <4 x i32> %a, %b
5994b941ff4SPhilip Reames  ret <4 x i32> %sub
6004b941ff4SPhilip Reames}
6014b941ff4SPhilip Reames
6024b941ff4SPhilip Reamesdefine <2 x i64> @smaxmin_v2i64(<2 x i64> %0, <2 x i64> %1) {
6034b941ff4SPhilip Reames;
6044b941ff4SPhilip Reames; CHECK-LABEL: smaxmin_v2i64:
6054b941ff4SPhilip Reames; CHECK:       # %bb.0:
6064b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
607*a6b870dbSPhilip Reames; CHECK-NEXT:    vmin.vv v10, v8, v9
608*a6b870dbSPhilip Reames; CHECK-NEXT:    vmax.vv v8, v8, v9
609*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v8, v8, v10
6104b941ff4SPhilip Reames; CHECK-NEXT:    ret
6114b941ff4SPhilip Reames  %a = tail call <2 x i64> @llvm.smax.v2i64(<2 x i64> %0, <2 x i64> %1)
6124b941ff4SPhilip Reames  %b = tail call <2 x i64> @llvm.smin.v2i64(<2 x i64> %0, <2 x i64> %1)
6134b941ff4SPhilip Reames  %sub = sub <2 x i64> %a, %b
6144b941ff4SPhilip Reames  ret <2 x i64> %sub
6154b941ff4SPhilip Reames}
6164b941ff4SPhilip Reames
6174b941ff4SPhilip Reamesdefine <16 x i8> @umaxmin_v16i8(<16 x i8> %0, <16 x i8> %1) {
6184b941ff4SPhilip Reames;
6194b941ff4SPhilip Reames; CHECK-LABEL: umaxmin_v16i8:
6204b941ff4SPhilip Reames; CHECK:       # %bb.0:
6214b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
622*a6b870dbSPhilip Reames; CHECK-NEXT:    vminu.vv v10, v8, v9
623*a6b870dbSPhilip Reames; CHECK-NEXT:    vmaxu.vv v8, v8, v9
624*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v8, v8, v10
6254b941ff4SPhilip Reames; CHECK-NEXT:    ret
6264b941ff4SPhilip Reames  %a = tail call <16 x i8> @llvm.umax.v16i8(<16 x i8> %0, <16 x i8> %1)
6274b941ff4SPhilip Reames  %b = tail call <16 x i8> @llvm.umin.v16i8(<16 x i8> %0, <16 x i8> %1)
6284b941ff4SPhilip Reames  %sub = sub <16 x i8> %a, %b
6294b941ff4SPhilip Reames  ret <16 x i8> %sub
6304b941ff4SPhilip Reames}
6314b941ff4SPhilip Reames
6324b941ff4SPhilip Reamesdefine <8 x i16> @umaxmin_v8i16(<8 x i16> %0, <8 x i16> %1) {
6334b941ff4SPhilip Reames;
6344b941ff4SPhilip Reames; CHECK-LABEL: umaxmin_v8i16:
6354b941ff4SPhilip Reames; CHECK:       # %bb.0:
6364b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
637*a6b870dbSPhilip Reames; CHECK-NEXT:    vminu.vv v10, v8, v9
638*a6b870dbSPhilip Reames; CHECK-NEXT:    vmaxu.vv v8, v8, v9
639*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v8, v8, v10
6404b941ff4SPhilip Reames; CHECK-NEXT:    ret
6414b941ff4SPhilip Reames  %a = tail call <8 x i16> @llvm.umax.v8i16(<8 x i16> %0, <8 x i16> %1)
6424b941ff4SPhilip Reames  %b = tail call <8 x i16> @llvm.umin.v8i16(<8 x i16> %0, <8 x i16> %1)
6434b941ff4SPhilip Reames  %sub = sub <8 x i16> %a, %b
6444b941ff4SPhilip Reames  ret <8 x i16> %sub
6454b941ff4SPhilip Reames}
6464b941ff4SPhilip Reames
6474b941ff4SPhilip Reamesdefine <4 x i32> @umaxmin_v4i32(<4 x i32> %0, <4 x i32> %1) {
6484b941ff4SPhilip Reames;
6494b941ff4SPhilip Reames; CHECK-LABEL: umaxmin_v4i32:
6504b941ff4SPhilip Reames; CHECK:       # %bb.0:
6514b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
652*a6b870dbSPhilip Reames; CHECK-NEXT:    vminu.vv v10, v8, v9
653*a6b870dbSPhilip Reames; CHECK-NEXT:    vmaxu.vv v8, v8, v9
654*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v8, v8, v10
6554b941ff4SPhilip Reames; CHECK-NEXT:    ret
6564b941ff4SPhilip Reames  %a = tail call <4 x i32> @llvm.umax.v4i32(<4 x i32> %0, <4 x i32> %1)
6574b941ff4SPhilip Reames  %b = tail call <4 x i32> @llvm.umin.v4i32(<4 x i32> %0, <4 x i32> %1)
6584b941ff4SPhilip Reames  %sub = sub <4 x i32> %a, %b
6594b941ff4SPhilip Reames  ret <4 x i32> %sub
6604b941ff4SPhilip Reames}
6614b941ff4SPhilip Reames
6624b941ff4SPhilip Reamesdefine <2 x i64> @umaxmin_v2i64(<2 x i64> %0, <2 x i64> %1) {
6634b941ff4SPhilip Reames;
6644b941ff4SPhilip Reames; CHECK-LABEL: umaxmin_v2i64:
6654b941ff4SPhilip Reames; CHECK:       # %bb.0:
6664b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
667*a6b870dbSPhilip Reames; CHECK-NEXT:    vminu.vv v10, v8, v9
668*a6b870dbSPhilip Reames; CHECK-NEXT:    vmaxu.vv v8, v8, v9
669*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v8, v8, v10
6704b941ff4SPhilip Reames; CHECK-NEXT:    ret
6714b941ff4SPhilip Reames  %a = tail call <2 x i64> @llvm.umax.v2i64(<2 x i64> %0, <2 x i64> %1)
6724b941ff4SPhilip Reames  %b = tail call <2 x i64> @llvm.umin.v2i64(<2 x i64> %0, <2 x i64> %1)
6734b941ff4SPhilip Reames  %sub = sub <2 x i64> %a, %b
6744b941ff4SPhilip Reames  ret <2 x i64> %sub
6754b941ff4SPhilip Reames}
6764b941ff4SPhilip Reames
6774b941ff4SPhilip Reamesdefine <16 x i8> @umaxmin_v16i8_com1(<16 x i8> %0, <16 x i8> %1) {
6784b941ff4SPhilip Reames;
6794b941ff4SPhilip Reames; CHECK-LABEL: umaxmin_v16i8_com1:
6804b941ff4SPhilip Reames; CHECK:       # %bb.0:
6814b941ff4SPhilip Reames; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
682*a6b870dbSPhilip Reames; CHECK-NEXT:    vminu.vv v10, v8, v9
683*a6b870dbSPhilip Reames; CHECK-NEXT:    vmaxu.vv v8, v8, v9
684*a6b870dbSPhilip Reames; CHECK-NEXT:    vsub.vv v8, v8, v10
6854b941ff4SPhilip Reames; CHECK-NEXT:    ret
6864b941ff4SPhilip Reames  %a = tail call <16 x i8> @llvm.umax.v16i8(<16 x i8> %0, <16 x i8> %1)
6874b941ff4SPhilip Reames  %b = tail call <16 x i8> @llvm.umin.v16i8(<16 x i8> %1, <16 x i8> %0)
6884b941ff4SPhilip Reames  %sub = sub <16 x i8> %a, %b
6894b941ff4SPhilip Reames  ret <16 x i8> %sub
6904b941ff4SPhilip Reames}
6914b941ff4SPhilip Reames
6924b941ff4SPhilip Reamesdeclare <8 x i8> @llvm.abs.v8i8(<8 x i8>, i1)
6934b941ff4SPhilip Reamesdeclare <16 x i8> @llvm.abs.v16i8(<16 x i8>, i1)
6944b941ff4SPhilip Reames
6954b941ff4SPhilip Reamesdeclare <4 x i16> @llvm.abs.v4i16(<4 x i16>, i1)
6964b941ff4SPhilip Reamesdeclare <8 x i16> @llvm.abs.v8i16(<8 x i16>, i1)
6974b941ff4SPhilip Reamesdeclare <16 x i16> @llvm.abs.v16i16(<16 x i16>, i1)
6984b941ff4SPhilip Reames
6994b941ff4SPhilip Reamesdeclare <2 x i32> @llvm.abs.v2i32(<2 x i32>, i1)
7004b941ff4SPhilip Reamesdeclare <4 x i32> @llvm.abs.v4i32(<4 x i32>, i1)
7014b941ff4SPhilip Reamesdeclare <8 x i32> @llvm.abs.v8i32(<8 x i32>, i1)
7024b941ff4SPhilip Reames
7034b941ff4SPhilip Reamesdeclare <2 x i64> @llvm.abs.v2i64(<2 x i64>, i1)
7044b941ff4SPhilip Reamesdeclare <4 x i64> @llvm.abs.v4i64(<4 x i64>, i1)
7054b941ff4SPhilip Reames
7064b941ff4SPhilip Reamesdeclare <2 x i128> @llvm.abs.v2i128(<2 x i128>, i1)
7074b941ff4SPhilip Reames
7084b941ff4SPhilip Reamesdeclare <16 x i8> @llvm.smax.v16i8(<16 x i8>, <16 x i8>)
7094b941ff4SPhilip Reamesdeclare <8 x i16> @llvm.smax.v8i16(<8 x i16>, <8 x i16>)
7104b941ff4SPhilip Reamesdeclare <4 x i32> @llvm.smax.v4i32(<4 x i32>, <4 x i32>)
7114b941ff4SPhilip Reamesdeclare <2 x i64> @llvm.smax.v2i64(<2 x i64>, <2 x i64>)
7124b941ff4SPhilip Reamesdeclare <16 x i8> @llvm.smin.v16i8(<16 x i8>, <16 x i8>)
7134b941ff4SPhilip Reamesdeclare <8 x i16> @llvm.smin.v8i16(<8 x i16>, <8 x i16>)
7144b941ff4SPhilip Reamesdeclare <4 x i32> @llvm.smin.v4i32(<4 x i32>, <4 x i32>)
7154b941ff4SPhilip Reamesdeclare <2 x i64> @llvm.smin.v2i64(<2 x i64>, <2 x i64>)
7164b941ff4SPhilip Reamesdeclare <16 x i8> @llvm.umax.v16i8(<16 x i8>, <16 x i8>)
7174b941ff4SPhilip Reamesdeclare <8 x i16> @llvm.umax.v8i16(<8 x i16>, <8 x i16>)
7184b941ff4SPhilip Reamesdeclare <4 x i32> @llvm.umax.v4i32(<4 x i32>, <4 x i32>)
7194b941ff4SPhilip Reamesdeclare <2 x i64> @llvm.umax.v2i64(<2 x i64>, <2 x i64>)
7204b941ff4SPhilip Reamesdeclare <16 x i8> @llvm.umin.v16i8(<16 x i8>, <16 x i8>)
7214b941ff4SPhilip Reamesdeclare <8 x i16> @llvm.umin.v8i16(<8 x i16>, <8 x i16>)
7224b941ff4SPhilip Reamesdeclare <4 x i32> @llvm.umin.v4i32(<4 x i32>, <4 x i32>)
7234b941ff4SPhilip Reamesdeclare <2 x i64> @llvm.umin.v2i64(<2 x i64>, <2 x i64>)
7244b941ff4SPhilip Reames
725*a6b870dbSPhilip Reames;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
726*a6b870dbSPhilip Reames; RV32: {{.*}}
727*a6b870dbSPhilip Reames; RV64: {{.*}}
728