14b941ff4SPhilip Reames; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 24b941ff4SPhilip Reames; RUN: llc -mtriple=riscv32 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 34b941ff4SPhilip Reames; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 44b941ff4SPhilip Reames 54b941ff4SPhilip Reames; 64b941ff4SPhilip Reames; SABD 74b941ff4SPhilip Reames; 84b941ff4SPhilip Reames 94b941ff4SPhilip Reamesdefine <vscale x 16 x i8> @sabd_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) { 104b941ff4SPhilip Reames; CHECK-LABEL: sabd_b: 114b941ff4SPhilip Reames; CHECK: # %bb.0: 124b941ff4SPhilip Reames; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma 13a6b870dbSPhilip Reames; CHECK-NEXT: vmin.vv v12, v8, v10 14a6b870dbSPhilip Reames; CHECK-NEXT: vmax.vv v8, v8, v10 15a6b870dbSPhilip Reames; CHECK-NEXT: vsub.vv v8, v8, v12 164b941ff4SPhilip Reames; CHECK-NEXT: ret 174b941ff4SPhilip Reames %a.sext = sext <vscale x 16 x i8> %a to <vscale x 16 x i16> 184b941ff4SPhilip Reames %b.sext = sext <vscale x 16 x i8> %b to <vscale x 16 x i16> 194b941ff4SPhilip Reames %sub = sub <vscale x 16 x i16> %a.sext, %b.sext 204b941ff4SPhilip Reames %abs = call <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16> %sub, i1 true) 214b941ff4SPhilip Reames %trunc = trunc <vscale x 16 x i16> %abs to <vscale x 16 x i8> 224b941ff4SPhilip Reames ret <vscale x 16 x i8> %trunc 234b941ff4SPhilip Reames} 244b941ff4SPhilip Reames 254b941ff4SPhilip Reamesdefine <vscale x 16 x i8> @sabd_b_promoted_ops(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) { 264b941ff4SPhilip Reames; CHECK-LABEL: sabd_b_promoted_ops: 274b941ff4SPhilip Reames; CHECK: # %bb.0: 284b941ff4SPhilip Reames; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma 29*13d04fa5SSimon Pilgrim; CHECK-NEXT: vmxor.mm v0, v0, v8 30*13d04fa5SSimon Pilgrim; CHECK-NEXT: vmv.v.i v8, 0 31*13d04fa5SSimon Pilgrim; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 324b941ff4SPhilip Reames; CHECK-NEXT: ret 334b941ff4SPhilip Reames %a.sext = sext <vscale x 16 x i1> %a to <vscale x 16 x i8> 344b941ff4SPhilip Reames %b.sext = sext <vscale x 16 x i1> %b to <vscale x 16 x i8> 354b941ff4SPhilip Reames %sub = sub <vscale x 16 x i8> %a.sext, %b.sext 364b941ff4SPhilip Reames %abs = call <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8> %sub, i1 true) 374b941ff4SPhilip Reames ret <vscale x 16 x i8> %abs 384b941ff4SPhilip Reames} 394b941ff4SPhilip Reames 404b941ff4SPhilip Reamesdefine <vscale x 8 x i16> @sabd_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) { 414b941ff4SPhilip Reames; CHECK-LABEL: sabd_h: 424b941ff4SPhilip Reames; CHECK: # %bb.0: 434b941ff4SPhilip Reames; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma 44a6b870dbSPhilip Reames; CHECK-NEXT: vmin.vv v12, v8, v10 45a6b870dbSPhilip Reames; CHECK-NEXT: vmax.vv v8, v8, v10 46a6b870dbSPhilip Reames; CHECK-NEXT: vsub.vv v8, v8, v12 474b941ff4SPhilip Reames; CHECK-NEXT: ret 484b941ff4SPhilip Reames %a.sext = sext <vscale x 8 x i16> %a to <vscale x 8 x i32> 494b941ff4SPhilip Reames %b.sext = sext <vscale x 8 x i16> %b to <vscale x 8 x i32> 504b941ff4SPhilip Reames %sub = sub <vscale x 8 x i32> %a.sext, %b.sext 514b941ff4SPhilip Reames %abs = call <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32> %sub, i1 true) 524b941ff4SPhilip Reames %trunc = trunc <vscale x 8 x i32> %abs to <vscale x 8 x i16> 534b941ff4SPhilip Reames ret <vscale x 8 x i16> %trunc 544b941ff4SPhilip Reames} 554b941ff4SPhilip Reames 564b941ff4SPhilip Reamesdefine <vscale x 8 x i16> @sabd_h_promoted_ops(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) { 574b941ff4SPhilip Reames; CHECK-LABEL: sabd_h_promoted_ops: 584b941ff4SPhilip Reames; CHECK: # %bb.0: 594b941ff4SPhilip Reames; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma 60a6b870dbSPhilip Reames; CHECK-NEXT: vmin.vv v10, v8, v9 61a6b870dbSPhilip Reames; CHECK-NEXT: vmax.vv v8, v8, v9 62a6b870dbSPhilip Reames; CHECK-NEXT: vsub.vv v10, v8, v10 634b941ff4SPhilip Reames; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma 64a6b870dbSPhilip Reames; CHECK-NEXT: vzext.vf2 v8, v10 654b941ff4SPhilip Reames; CHECK-NEXT: ret 664b941ff4SPhilip Reames %a.sext = sext <vscale x 8 x i8> %a to <vscale x 8 x i16> 674b941ff4SPhilip Reames %b.sext = sext <vscale x 8 x i8> %b to <vscale x 8 x i16> 684b941ff4SPhilip Reames %sub = sub <vscale x 8 x i16> %a.sext, %b.sext 694b941ff4SPhilip Reames %abs = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %sub, i1 true) 704b941ff4SPhilip Reames ret <vscale x 8 x i16> %abs 714b941ff4SPhilip Reames} 724b941ff4SPhilip Reames 734b941ff4SPhilip Reamesdefine <vscale x 4 x i32> @sabd_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) { 744b941ff4SPhilip Reames; CHECK-LABEL: sabd_s: 754b941ff4SPhilip Reames; CHECK: # %bb.0: 764b941ff4SPhilip Reames; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma 77a6b870dbSPhilip Reames; CHECK-NEXT: vmin.vv v12, v8, v10 78a6b870dbSPhilip Reames; CHECK-NEXT: vmax.vv v8, v8, v10 79a6b870dbSPhilip Reames; CHECK-NEXT: vsub.vv v8, v8, v12 804b941ff4SPhilip Reames; CHECK-NEXT: ret 814b941ff4SPhilip Reames %a.sext = sext <vscale x 4 x i32> %a to <vscale x 4 x i64> 824b941ff4SPhilip Reames %b.sext = sext <vscale x 4 x i32> %b to <vscale x 4 x i64> 834b941ff4SPhilip Reames %sub = sub <vscale x 4 x i64> %a.sext, %b.sext 844b941ff4SPhilip Reames %abs = call <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64> %sub, i1 true) 854b941ff4SPhilip Reames %trunc = trunc <vscale x 4 x i64> %abs to <vscale x 4 x i32> 864b941ff4SPhilip Reames ret <vscale x 4 x i32> %trunc 874b941ff4SPhilip Reames} 884b941ff4SPhilip Reames 894b941ff4SPhilip Reamesdefine <vscale x 4 x i32> @sabd_s_promoted_ops(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) { 904b941ff4SPhilip Reames; CHECK-LABEL: sabd_s_promoted_ops: 914b941ff4SPhilip Reames; CHECK: # %bb.0: 924b941ff4SPhilip Reames; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma 93a6b870dbSPhilip Reames; CHECK-NEXT: vmin.vv v10, v8, v9 94a6b870dbSPhilip Reames; CHECK-NEXT: vmax.vv v8, v8, v9 95a6b870dbSPhilip Reames; CHECK-NEXT: vsub.vv v10, v8, v10 964b941ff4SPhilip Reames; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma 97a6b870dbSPhilip Reames; CHECK-NEXT: vzext.vf2 v8, v10 984b941ff4SPhilip Reames; CHECK-NEXT: ret 994b941ff4SPhilip Reames %a.sext = sext <vscale x 4 x i16> %a to <vscale x 4 x i32> 1004b941ff4SPhilip Reames %b.sext = sext <vscale x 4 x i16> %b to <vscale x 4 x i32> 1014b941ff4SPhilip Reames %sub = sub <vscale x 4 x i32> %a.sext, %b.sext 1024b941ff4SPhilip Reames %abs = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %sub, i1 true) 1034b941ff4SPhilip Reames ret <vscale x 4 x i32> %abs 1044b941ff4SPhilip Reames} 1054b941ff4SPhilip Reames 1064b941ff4SPhilip Reames; FIXME: Crashes legalization if enabled 1074b941ff4SPhilip Reames;; define <vscale x 2 x i64> @sabd_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { 1084b941ff4SPhilip Reames;; %a.sext = sext <vscale x 2 x i64> %a to <vscale x 2 x i128> 1094b941ff4SPhilip Reames;; %b.sext = sext <vscale x 2 x i64> %b to <vscale x 2 x i128> 1104b941ff4SPhilip Reames;; %sub = sub <vscale x 2 x i128> %a.sext, %b.sext 1114b941ff4SPhilip Reames;; %abs = call <vscale x 2 x i128> @llvm.abs.nxv2i128(<vscale x 2 x i128> %sub, i1 true) 1124b941ff4SPhilip Reames;; %trunc = trunc <vscale x 2 x i128> %abs to <vscale x 2 x i64> 1134b941ff4SPhilip Reames;; ret <vscale x 2 x i64> %trunc 1144b941ff4SPhilip Reames;; } 1154b941ff4SPhilip Reames 1164b941ff4SPhilip Reamesdefine <vscale x 2 x i64> @sabd_d_promoted_ops(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) { 1174b941ff4SPhilip Reames; CHECK-LABEL: sabd_d_promoted_ops: 1184b941ff4SPhilip Reames; CHECK: # %bb.0: 1194b941ff4SPhilip Reames; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma 120a6b870dbSPhilip Reames; CHECK-NEXT: vmin.vv v10, v8, v9 121a6b870dbSPhilip Reames; CHECK-NEXT: vmax.vv v8, v8, v9 122a6b870dbSPhilip Reames; CHECK-NEXT: vsub.vv v10, v8, v10 1234b941ff4SPhilip Reames; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma 124a6b870dbSPhilip Reames; CHECK-NEXT: vzext.vf2 v8, v10 1254b941ff4SPhilip Reames; CHECK-NEXT: ret 1264b941ff4SPhilip Reames %a.sext = sext <vscale x 2 x i32> %a to <vscale x 2 x i64> 1274b941ff4SPhilip Reames %b.sext = sext <vscale x 2 x i32> %b to <vscale x 2 x i64> 1284b941ff4SPhilip Reames %sub = sub <vscale x 2 x i64> %a.sext, %b.sext 1294b941ff4SPhilip Reames %abs = call <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64> %sub, i1 true) 1304b941ff4SPhilip Reames ret <vscale x 2 x i64> %abs 1314b941ff4SPhilip Reames} 1324b941ff4SPhilip Reames 1334b941ff4SPhilip Reames; 1344b941ff4SPhilip Reames; UABD 1354b941ff4SPhilip Reames; 1364b941ff4SPhilip Reames 1374b941ff4SPhilip Reamesdefine <vscale x 16 x i8> @uabd_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) { 1384b941ff4SPhilip Reames; CHECK-LABEL: uabd_b: 1394b941ff4SPhilip Reames; CHECK: # %bb.0: 1404b941ff4SPhilip Reames; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma 141a6b870dbSPhilip Reames; CHECK-NEXT: vminu.vv v12, v8, v10 142a6b870dbSPhilip Reames; CHECK-NEXT: vmaxu.vv v8, v8, v10 143a6b870dbSPhilip Reames; CHECK-NEXT: vsub.vv v8, v8, v12 1444b941ff4SPhilip Reames; CHECK-NEXT: ret 1454b941ff4SPhilip Reames %a.zext = zext <vscale x 16 x i8> %a to <vscale x 16 x i16> 1464b941ff4SPhilip Reames %b.zext = zext <vscale x 16 x i8> %b to <vscale x 16 x i16> 1474b941ff4SPhilip Reames %sub = sub <vscale x 16 x i16> %a.zext, %b.zext 1484b941ff4SPhilip Reames %abs = call <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16> %sub, i1 true) 1494b941ff4SPhilip Reames %trunc = trunc <vscale x 16 x i16> %abs to <vscale x 16 x i8> 1504b941ff4SPhilip Reames ret <vscale x 16 x i8> %trunc 1514b941ff4SPhilip Reames} 1524b941ff4SPhilip Reames 1534b941ff4SPhilip Reamesdefine <vscale x 16 x i8> @uabd_b_promoted_ops(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) { 1544b941ff4SPhilip Reames; CHECK-LABEL: uabd_b_promoted_ops: 1554b941ff4SPhilip Reames; CHECK: # %bb.0: 1564b941ff4SPhilip Reames; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma 157*13d04fa5SSimon Pilgrim; CHECK-NEXT: vmxor.mm v0, v0, v8 158*13d04fa5SSimon Pilgrim; CHECK-NEXT: vmv.v.i v8, 0 159*13d04fa5SSimon Pilgrim; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 1604b941ff4SPhilip Reames; CHECK-NEXT: ret 1614b941ff4SPhilip Reames %a.zext = zext <vscale x 16 x i1> %a to <vscale x 16 x i8> 1624b941ff4SPhilip Reames %b.zext = zext <vscale x 16 x i1> %b to <vscale x 16 x i8> 1634b941ff4SPhilip Reames %sub = sub <vscale x 16 x i8> %a.zext, %b.zext 1644b941ff4SPhilip Reames %abs = call <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8> %sub, i1 true) 1654b941ff4SPhilip Reames ret <vscale x 16 x i8> %abs 1664b941ff4SPhilip Reames} 1674b941ff4SPhilip Reames 1684b941ff4SPhilip Reamesdefine <vscale x 8 x i16> @uabd_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) { 1694b941ff4SPhilip Reames; CHECK-LABEL: uabd_h: 1704b941ff4SPhilip Reames; CHECK: # %bb.0: 1714b941ff4SPhilip Reames; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma 172a6b870dbSPhilip Reames; CHECK-NEXT: vminu.vv v12, v8, v10 173a6b870dbSPhilip Reames; CHECK-NEXT: vmaxu.vv v8, v8, v10 174a6b870dbSPhilip Reames; CHECK-NEXT: vsub.vv v8, v8, v12 1754b941ff4SPhilip Reames; CHECK-NEXT: ret 1764b941ff4SPhilip Reames %a.zext = zext <vscale x 8 x i16> %a to <vscale x 8 x i32> 1774b941ff4SPhilip Reames %b.zext = zext <vscale x 8 x i16> %b to <vscale x 8 x i32> 1784b941ff4SPhilip Reames %sub = sub <vscale x 8 x i32> %a.zext, %b.zext 1794b941ff4SPhilip Reames %abs = call <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32> %sub, i1 true) 1804b941ff4SPhilip Reames %trunc = trunc <vscale x 8 x i32> %abs to <vscale x 8 x i16> 1814b941ff4SPhilip Reames ret <vscale x 8 x i16> %trunc 1824b941ff4SPhilip Reames} 1834b941ff4SPhilip Reames 1844b941ff4SPhilip Reamesdefine <vscale x 8 x i16> @uabd_h_promoted_ops(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) { 1854b941ff4SPhilip Reames; CHECK-LABEL: uabd_h_promoted_ops: 1864b941ff4SPhilip Reames; CHECK: # %bb.0: 1874b941ff4SPhilip Reames; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma 188a6b870dbSPhilip Reames; CHECK-NEXT: vminu.vv v10, v8, v9 189a6b870dbSPhilip Reames; CHECK-NEXT: vmaxu.vv v8, v8, v9 190a6b870dbSPhilip Reames; CHECK-NEXT: vsub.vv v10, v8, v10 1914b941ff4SPhilip Reames; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma 192a6b870dbSPhilip Reames; CHECK-NEXT: vzext.vf2 v8, v10 1934b941ff4SPhilip Reames; CHECK-NEXT: ret 1944b941ff4SPhilip Reames %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i16> 1954b941ff4SPhilip Reames %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i16> 1964b941ff4SPhilip Reames %sub = sub <vscale x 8 x i16> %a.zext, %b.zext 1974b941ff4SPhilip Reames %abs = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %sub, i1 true) 1984b941ff4SPhilip Reames ret <vscale x 8 x i16> %abs 1994b941ff4SPhilip Reames} 2004b941ff4SPhilip Reames 2014b941ff4SPhilip Reamesdefine <vscale x 4 x i32> @uabd_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) { 2024b941ff4SPhilip Reames; CHECK-LABEL: uabd_s: 2034b941ff4SPhilip Reames; CHECK: # %bb.0: 2044b941ff4SPhilip Reames; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma 205a6b870dbSPhilip Reames; CHECK-NEXT: vminu.vv v12, v8, v10 206a6b870dbSPhilip Reames; CHECK-NEXT: vmaxu.vv v8, v8, v10 207a6b870dbSPhilip Reames; CHECK-NEXT: vsub.vv v8, v8, v12 2084b941ff4SPhilip Reames; CHECK-NEXT: ret 2094b941ff4SPhilip Reames %a.zext = zext <vscale x 4 x i32> %a to <vscale x 4 x i64> 2104b941ff4SPhilip Reames %b.zext = zext <vscale x 4 x i32> %b to <vscale x 4 x i64> 2114b941ff4SPhilip Reames %sub = sub <vscale x 4 x i64> %a.zext, %b.zext 2124b941ff4SPhilip Reames %abs = call <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64> %sub, i1 true) 2134b941ff4SPhilip Reames %trunc = trunc <vscale x 4 x i64> %abs to <vscale x 4 x i32> 2144b941ff4SPhilip Reames ret <vscale x 4 x i32> %trunc 2154b941ff4SPhilip Reames} 2164b941ff4SPhilip Reames 2174b941ff4SPhilip Reamesdefine <vscale x 4 x i32> @uabd_s_promoted_ops(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) { 2184b941ff4SPhilip Reames; CHECK-LABEL: uabd_s_promoted_ops: 2194b941ff4SPhilip Reames; CHECK: # %bb.0: 2204b941ff4SPhilip Reames; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma 221a6b870dbSPhilip Reames; CHECK-NEXT: vminu.vv v10, v8, v9 222a6b870dbSPhilip Reames; CHECK-NEXT: vmaxu.vv v8, v8, v9 223a6b870dbSPhilip Reames; CHECK-NEXT: vsub.vv v10, v8, v10 2244b941ff4SPhilip Reames; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma 225a6b870dbSPhilip Reames; CHECK-NEXT: vzext.vf2 v8, v10 2264b941ff4SPhilip Reames; CHECK-NEXT: ret 2274b941ff4SPhilip Reames %a.zext = zext <vscale x 4 x i16> %a to <vscale x 4 x i32> 2284b941ff4SPhilip Reames %b.zext = zext <vscale x 4 x i16> %b to <vscale x 4 x i32> 2294b941ff4SPhilip Reames %sub = sub <vscale x 4 x i32> %a.zext, %b.zext 2304b941ff4SPhilip Reames %abs = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %sub, i1 true) 2314b941ff4SPhilip Reames ret <vscale x 4 x i32> %abs 2324b941ff4SPhilip Reames} 2334b941ff4SPhilip Reames 2344b941ff4SPhilip Reames; FIXME: Crashes legalization if enabled 2354b941ff4SPhilip Reames;; define <vscale x 2 x i64> @uabd_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { 2364b941ff4SPhilip Reames;; %a.zext = zext <vscale x 2 x i64> %a to <vscale x 2 x i128> 2374b941ff4SPhilip Reames;; %b.zext = zext <vscale x 2 x i64> %b to <vscale x 2 x i128> 2384b941ff4SPhilip Reames;; %sub = sub <vscale x 2 x i128> %a.zext, %b.zext 2394b941ff4SPhilip Reames;; %abs = call <vscale x 2 x i128> @llvm.abs.nxv2i128(<vscale x 2 x i128> %sub, i1 true) 2404b941ff4SPhilip Reames;; %trunc = trunc <vscale x 2 x i128> %abs to <vscale x 2 x i64> 2414b941ff4SPhilip Reames;; ret <vscale x 2 x i64> %trunc 2424b941ff4SPhilip Reames;; } 2434b941ff4SPhilip Reames 2444b941ff4SPhilip Reamesdefine <vscale x 2 x i64> @uabd_d_promoted_ops(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) { 2454b941ff4SPhilip Reames; CHECK-LABEL: uabd_d_promoted_ops: 2464b941ff4SPhilip Reames; CHECK: # %bb.0: 2474b941ff4SPhilip Reames; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma 248a6b870dbSPhilip Reames; CHECK-NEXT: vminu.vv v10, v8, v9 249a6b870dbSPhilip Reames; CHECK-NEXT: vmaxu.vv v8, v8, v9 250a6b870dbSPhilip Reames; CHECK-NEXT: vsub.vv v10, v8, v10 2514b941ff4SPhilip Reames; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma 252a6b870dbSPhilip Reames; CHECK-NEXT: vzext.vf2 v8, v10 2534b941ff4SPhilip Reames; CHECK-NEXT: ret 2544b941ff4SPhilip Reames %a.zext = zext <vscale x 2 x i32> %a to <vscale x 2 x i64> 2554b941ff4SPhilip Reames %b.zext = zext <vscale x 2 x i32> %b to <vscale x 2 x i64> 2564b941ff4SPhilip Reames %sub = sub <vscale x 2 x i64> %a.zext, %b.zext 2574b941ff4SPhilip Reames %abs = call <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64> %sub, i1 true) 2584b941ff4SPhilip Reames ret <vscale x 2 x i64> %abs 2594b941ff4SPhilip Reames} 2604b941ff4SPhilip Reames 2614b941ff4SPhilip Reames; Test the situation where isLegal(ISD::ABD, typeof(%a)) returns true but %a and 2624b941ff4SPhilip Reames; %b have differing types. 2634b941ff4SPhilip Reamesdefine <vscale x 4 x i32> @uabd_non_matching_extension(<vscale x 4 x i32> %a, <vscale x 4 x i8> %b) { 2644b941ff4SPhilip Reames; CHECK-LABEL: uabd_non_matching_extension: 2654b941ff4SPhilip Reames; CHECK: # %bb.0: 2664b941ff4SPhilip Reames; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma 2674b941ff4SPhilip Reames; CHECK-NEXT: vzext.vf4 v12, v10 268a6b870dbSPhilip Reames; CHECK-NEXT: vminu.vv v10, v8, v12 269a6b870dbSPhilip Reames; CHECK-NEXT: vmaxu.vv v8, v8, v12 270a6b870dbSPhilip Reames; CHECK-NEXT: vsub.vv v8, v8, v10 2714b941ff4SPhilip Reames; CHECK-NEXT: ret 2724b941ff4SPhilip Reames %a.zext = zext <vscale x 4 x i32> %a to <vscale x 4 x i64> 2734b941ff4SPhilip Reames %b.zext = zext <vscale x 4 x i8> %b to <vscale x 4 x i64> 2744b941ff4SPhilip Reames %sub = sub <vscale x 4 x i64> %a.zext, %b.zext 2754b941ff4SPhilip Reames %abs = call <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64> %sub, i1 true) 2764b941ff4SPhilip Reames %trunc = trunc <vscale x 4 x i64> %abs to <vscale x 4 x i32> 2774b941ff4SPhilip Reames ret <vscale x 4 x i32> %trunc 2784b941ff4SPhilip Reames} 2794b941ff4SPhilip Reames 2804b941ff4SPhilip Reames; Test the situation where isLegal(ISD::ABD, typeof(%a.zext)) returns true but 2814b941ff4SPhilip Reames; %a and %b have differing types. 2824b941ff4SPhilip Reamesdefine <vscale x 4 x i32> @uabd_non_matching_promoted_ops(<vscale x 4 x i8> %a, <vscale x 4 x i16> %b) { 2834b941ff4SPhilip Reames; CHECK-LABEL: uabd_non_matching_promoted_ops: 2844b941ff4SPhilip Reames; CHECK: # %bb.0: 2854b941ff4SPhilip Reames; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma 2864b941ff4SPhilip Reames; CHECK-NEXT: vzext.vf2 v10, v8 287a6b870dbSPhilip Reames; CHECK-NEXT: vminu.vv v8, v10, v9 288a6b870dbSPhilip Reames; CHECK-NEXT: vmaxu.vv v9, v10, v9 289a6b870dbSPhilip Reames; CHECK-NEXT: vsub.vv v10, v9, v8 2904b941ff4SPhilip Reames; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma 291a6b870dbSPhilip Reames; CHECK-NEXT: vzext.vf2 v8, v10 2924b941ff4SPhilip Reames; CHECK-NEXT: ret 2934b941ff4SPhilip Reames %a.zext = zext <vscale x 4 x i8> %a to <vscale x 4 x i32> 2944b941ff4SPhilip Reames %b.zext = zext <vscale x 4 x i16> %b to <vscale x 4 x i32> 2954b941ff4SPhilip Reames %sub = sub <vscale x 4 x i32> %a.zext, %b.zext 2964b941ff4SPhilip Reames %abs = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %sub, i1 true) 2974b941ff4SPhilip Reames ret <vscale x 4 x i32> %abs 2984b941ff4SPhilip Reames} 2994b941ff4SPhilip Reames 3004b941ff4SPhilip Reames; Test the situation where isLegal(ISD::ABD, typeof(%a)) returns true but %a and 3014b941ff4SPhilip Reames; %b are promoted differently. 3024b941ff4SPhilip Reamesdefine <vscale x 4 x i32> @uabd_non_matching_promotion(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b) { 3034b941ff4SPhilip Reames; CHECK-LABEL: uabd_non_matching_promotion: 3044b941ff4SPhilip Reames; CHECK: # %bb.0: 3054b941ff4SPhilip Reames; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma 3064b941ff4SPhilip Reames; CHECK-NEXT: vzext.vf4 v10, v8 3074b941ff4SPhilip Reames; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma 3084b941ff4SPhilip Reames; CHECK-NEXT: vsext.vf2 v8, v9 3094b941ff4SPhilip Reames; CHECK-NEXT: vwsub.wv v10, v10, v8 3104b941ff4SPhilip Reames; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma 3114b941ff4SPhilip Reames; CHECK-NEXT: vrsub.vi v8, v10, 0 3124b941ff4SPhilip Reames; CHECK-NEXT: vmax.vv v8, v10, v8 3134b941ff4SPhilip Reames; CHECK-NEXT: ret 3144b941ff4SPhilip Reames %a.zext = zext <vscale x 4 x i8> %a to <vscale x 4 x i32> 3154b941ff4SPhilip Reames %b.zext = sext <vscale x 4 x i8> %b to <vscale x 4 x i32> 3164b941ff4SPhilip Reames %sub = sub <vscale x 4 x i32> %a.zext, %b.zext 3174b941ff4SPhilip Reames %abs = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %sub, i1 true) 3184b941ff4SPhilip Reames ret <vscale x 4 x i32> %abs 3194b941ff4SPhilip Reames} 3204b941ff4SPhilip Reames 3214b941ff4SPhilip Reamesdeclare <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8>, i1) 3224b941ff4SPhilip Reames 3234b941ff4SPhilip Reamesdeclare <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16>, i1) 3244b941ff4SPhilip Reamesdeclare <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16>, i1) 3254b941ff4SPhilip Reames 3264b941ff4SPhilip Reamesdeclare <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32>, i1) 3274b941ff4SPhilip Reamesdeclare <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32>, i1) 3284b941ff4SPhilip Reames 3294b941ff4SPhilip Reamesdeclare <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64>, i1) 3304b941ff4SPhilip Reamesdeclare <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64>, i1) 3314b941ff4SPhilip Reames 3324b941ff4SPhilip Reamesdeclare <vscale x 2 x i128> @llvm.abs.nxv2i128(<vscale x 2 x i128>, i1) 3334b941ff4SPhilip Reames;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: 3344b941ff4SPhilip Reames; RV32: {{.*}} 3354b941ff4SPhilip Reames; RV64: {{.*}} 336