1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -mattr=+m,+v < %s | FileCheck %s \ 3; RUN: --check-prefixes=CHECK,RV32 4; RUN: llc -mtriple=riscv64 -mattr=+m,+v < %s | FileCheck %s \ 5; RUN: --check-prefixes=CHECK,RV64 6 7; fold (add (umax X, C), -C) --> (usubsat X, C) 8 9define <2 x i64> @add_umax_v2i64(<2 x i64> %a0) { 10; CHECK-LABEL: add_umax_v2i64: 11; CHECK: # %bb.0: 12; CHECK-NEXT: li a0, 7 13; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma 14; CHECK-NEXT: vssubu.vx v8, v8, a0 15; CHECK-NEXT: ret 16 %v1 = call <2 x i64> @llvm.umax.v2i64(<2 x i64> %a0, <2 x i64> <i64 7, i64 7>) 17 %v2 = add <2 x i64> %v1, <i64 -7, i64 -7> 18 ret <2 x i64> %v2 19} 20 21define <vscale x 2 x i64> @add_umax_nxv2i64(<vscale x 2 x i64> %a0) { 22; CHECK-LABEL: add_umax_nxv2i64: 23; CHECK: # %bb.0: 24; CHECK-NEXT: li a0, 7 25; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma 26; CHECK-NEXT: vssubu.vx v8, v8, a0 27; CHECK-NEXT: ret 28 %v1 = call <vscale x 2 x i64> @llvm.umax.nxv2i64(<vscale x 2 x i64> %a0, <vscale x 2 x i64> splat (i64 7)) 29 %v2 = add <vscale x 2 x i64> %v1, splat (i64 -7) 30 ret <vscale x 2 x i64> %v2 31} 32 33; Try to find umax(a,b) - b or a - umin(a,b) patterns 34; they may be converted to usubsat(a,b). 35 36define <2 x i64> @sub_umax_v2i64(<2 x i64> %a0, <2 x i64> %a1) { 37; CHECK-LABEL: sub_umax_v2i64: 38; CHECK: # %bb.0: 39; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma 40; CHECK-NEXT: vssubu.vv v8, v8, v9 41; CHECK-NEXT: ret 42 %v1 = call <2 x i64> @llvm.umax.v2i64(<2 x i64> %a0, <2 x i64> %a1) 43 %v2 = sub <2 x i64> %v1, %a1 44 ret <2 x i64> %v2 45} 46 47define <vscale x 2 x i64> @sub_umax_nxv2i64(<vscale x 2 x i64> %a0, <vscale x 2 x i64> %a1) { 48; CHECK-LABEL: sub_umax_nxv2i64: 49; CHECK: # %bb.0: 50; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma 51; CHECK-NEXT: vssubu.vv v8, v8, v10 52; CHECK-NEXT: ret 53 %v1 = call <vscale x 2 x i64> @llvm.umax.nxv2i64(<vscale x 2 x i64> %a0, <vscale x 2 x i64> %a1) 54 %v2 = sub <vscale x 2 x i64> %v1, %a1 55 ret <vscale x 2 x i64> %v2 56} 57 58define <2 x i64> @sub_umin_v2i64(<2 x i64> %a0, <2 x i64> %a1) { 59; CHECK-LABEL: sub_umin_v2i64: 60; CHECK: # %bb.0: 61; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma 62; CHECK-NEXT: vssubu.vv v8, v8, v9 63; CHECK-NEXT: ret 64 %v1 = call <2 x i64> @llvm.umin.v2i64(<2 x i64> %a0, <2 x i64> %a1) 65 %v2 = sub <2 x i64> %a0, %v1 66 ret <2 x i64> %v2 67} 68 69define <vscale x 2 x i64> @sub_umin_nxv2i64(<vscale x 2 x i64> %a0, <vscale x 2 x i64> %a1) { 70; CHECK-LABEL: sub_umin_nxv2i64: 71; CHECK: # %bb.0: 72; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma 73; CHECK-NEXT: vssubu.vv v8, v8, v10 74; CHECK-NEXT: ret 75 %v1 = call <vscale x 2 x i64> @llvm.umin.nxv2i64(<vscale x 2 x i64> %a0, <vscale x 2 x i64> %a1) 76 %v2 = sub <vscale x 2 x i64> %a0, %v1 77 ret <vscale x 2 x i64> %v2 78} 79 80; Match VSELECTs into sub with unsigned saturation. 81 82; x >= y ? x-y : 0 --> usubsat x, y 83 84define <2 x i64> @vselect_sub_v2i64(<2 x i64> %a0, <2 x i64> %a1) { 85; CHECK-LABEL: vselect_sub_v2i64: 86; CHECK: # %bb.0: 87; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma 88; CHECK-NEXT: vssubu.vv v8, v8, v9 89; CHECK-NEXT: ret 90 %cmp = icmp uge <2 x i64> %a0, %a1 91 %v1 = sub <2 x i64> %a0, %a1 92 %v2 = select <2 x i1> %cmp, <2 x i64> %v1, <2 x i64> zeroinitializer 93 ret <2 x i64> %v2 94} 95 96define <vscale x 2 x i64> @vselect_sub_nxv2i64(<vscale x 2 x i64> %a0, <vscale x 2 x i64> %a1) { 97; CHECK-LABEL: vselect_sub_nxv2i64: 98; CHECK: # %bb.0: 99; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma 100; CHECK-NEXT: vssubu.vv v8, v8, v10 101; CHECK-NEXT: ret 102 %cmp = icmp uge <vscale x 2 x i64> %a0, %a1 103 %v1 = sub <vscale x 2 x i64> %a0, %a1 104 %v2 = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %v1, <vscale x 2 x i64> zeroinitializer 105 ret <vscale x 2 x i64> %v2 106} 107 108define <8 x i16> @vselect_sub_2_v8i16(<8 x i16> %x, i16 zeroext %w) nounwind { 109; CHECK-LABEL: vselect_sub_2_v8i16: 110; CHECK: # %bb.0: # %entry 111; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma 112; CHECK-NEXT: vssubu.vx v8, v8, a0 113; CHECK-NEXT: ret 114entry: 115 %0 = insertelement <8 x i16> poison, i16 %w, i32 0 116 %broadcast15 = shufflevector <8 x i16> %0, <8 x i16> poison, <8 x i32> zeroinitializer 117 %1 = icmp ult <8 x i16> %x, %broadcast15 118 %2 = sub <8 x i16> %x, %broadcast15 119 %res = select <8 x i1> %1, <8 x i16> zeroinitializer, <8 x i16> %2 120 ret <8 x i16> %res 121} 122 123define <vscale x 8 x i16> @vselect_sub_2_nxv8i16(<vscale x 8 x i16> %x, i16 zeroext %w) nounwind { 124; CHECK-LABEL: vselect_sub_2_nxv8i16: 125; CHECK: # %bb.0: # %entry 126; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma 127; CHECK-NEXT: vssubu.vx v8, v8, a0 128; CHECK-NEXT: ret 129entry: 130 %0 = insertelement <vscale x 8 x i16> poison, i16 %w, i32 0 131 %broadcast15 = shufflevector <vscale x 8 x i16> %0, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer 132 %1 = icmp ult <vscale x 8 x i16> %x, %broadcast15 133 %2 = sub <vscale x 8 x i16> %x, %broadcast15 134 %res = select <vscale x 8 x i1> %1, <vscale x 8 x i16> zeroinitializer, <vscale x 8 x i16> %2 135 ret <vscale x 8 x i16> %res 136} 137 138; x > y ? x-y : 0 --> usubsat x, y 139; x > C-1 ? x+-C : 0 --> usubsat x, C 140 141define <2 x i64> @vselect_add_const_v2i64(<2 x i64> %a0) { 142; CHECK-LABEL: vselect_add_const_v2i64: 143; CHECK: # %bb.0: 144; CHECK-NEXT: li a0, 6 145; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma 146; CHECK-NEXT: vssubu.vx v8, v8, a0 147; CHECK-NEXT: ret 148 %v1 = add <2 x i64> %a0, <i64 -6, i64 -6> 149 %cmp = icmp ugt <2 x i64> %a0, <i64 5, i64 5> 150 %v2 = select <2 x i1> %cmp, <2 x i64> %v1, <2 x i64> zeroinitializer 151 ret <2 x i64> %v2 152} 153 154define <vscale x 2 x i64> @vselect_add_const_nxv2i64(<vscale x 2 x i64> %a0) { 155; CHECK-LABEL: vselect_add_const_nxv2i64: 156; CHECK: # %bb.0: 157; CHECK-NEXT: li a0, 6 158; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma 159; CHECK-NEXT: vssubu.vx v8, v8, a0 160; CHECK-NEXT: ret 161 %v1 = add <vscale x 2 x i64> %a0, splat (i64 -6) 162 %cmp = icmp ugt <vscale x 2 x i64> %a0, splat (i64 5) 163 %v2 = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %v1, <vscale x 2 x i64> zeroinitializer 164 ret <vscale x 2 x i64> %v2 165} 166 167define <2 x i16> @vselect_add_const_signbit_v2i16(<2 x i16> %a0) { 168; CHECK-LABEL: vselect_add_const_signbit_v2i16: 169; CHECK: # %bb.0: 170; CHECK-NEXT: lui a0, 8 171; CHECK-NEXT: addi a0, a0, -1 172; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma 173; CHECK-NEXT: vssubu.vx v8, v8, a0 174; CHECK-NEXT: ret 175 %cmp = icmp ugt <2 x i16> %a0, <i16 32766, i16 32766> 176 %v1 = add <2 x i16> %a0, <i16 -32767, i16 -32767> 177 %v2 = select <2 x i1> %cmp, <2 x i16> %v1, <2 x i16> zeroinitializer 178 ret <2 x i16> %v2 179} 180 181define <vscale x 2 x i16> @vselect_add_const_signbit_nxv2i16(<vscale x 2 x i16> %a0) { 182; CHECK-LABEL: vselect_add_const_signbit_nxv2i16: 183; CHECK: # %bb.0: 184; CHECK-NEXT: lui a0, 8 185; CHECK-NEXT: addi a0, a0, -1 186; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma 187; CHECK-NEXT: vssubu.vx v8, v8, a0 188; CHECK-NEXT: ret 189 %cmp = icmp ugt <vscale x 2 x i16> %a0, splat (i16 32766) 190 %v1 = add <vscale x 2 x i16> %a0, splat (i16 -32767) 191 %v2 = select <vscale x 2 x i1> %cmp, <vscale x 2 x i16> %v1, <vscale x 2 x i16> zeroinitializer 192 ret <vscale x 2 x i16> %v2 193} 194 195; x s< 0 ? x^C : 0 --> usubsat x, C 196 197define <2 x i16> @vselect_xor_const_signbit_v2i16(<2 x i16> %a0) { 198; CHECK-LABEL: vselect_xor_const_signbit_v2i16: 199; CHECK: # %bb.0: 200; CHECK-NEXT: lui a0, 8 201; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma 202; CHECK-NEXT: vssubu.vx v8, v8, a0 203; CHECK-NEXT: ret 204 %cmp = icmp slt <2 x i16> %a0, zeroinitializer 205 %v1 = xor <2 x i16> %a0, <i16 -32768, i16 -32768> 206 %v2 = select <2 x i1> %cmp, <2 x i16> %v1, <2 x i16> zeroinitializer 207 ret <2 x i16> %v2 208} 209 210define <vscale x 2 x i16> @vselect_xor_const_signbit_nxv2i16(<vscale x 2 x i16> %a0) { 211; CHECK-LABEL: vselect_xor_const_signbit_nxv2i16: 212; CHECK: # %bb.0: 213; CHECK-NEXT: lui a0, 8 214; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma 215; CHECK-NEXT: vssubu.vx v8, v8, a0 216; CHECK-NEXT: ret 217 %cmp = icmp slt <vscale x 2 x i16> %a0, zeroinitializer 218 %v1 = xor <vscale x 2 x i16> %a0, splat (i16 -32768) 219 %v2 = select <vscale x 2 x i1> %cmp, <vscale x 2 x i16> %v1, <vscale x 2 x i16> zeroinitializer 220 ret <vscale x 2 x i16> %v2 221} 222 223; Match VSELECTs into add with unsigned saturation. 224 225; x <= x+y ? x+y : ~0 --> uaddsat x, y 226; x+y >= x ? x+y : ~0 --> uaddsat x, y 227 228define <2 x i64> @vselect_add_v2i64(<2 x i64> %a0, <2 x i64> %a1) { 229; CHECK-LABEL: vselect_add_v2i64: 230; CHECK: # %bb.0: 231; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma 232; CHECK-NEXT: vsaddu.vv v8, v8, v9 233; CHECK-NEXT: ret 234 %v1 = add <2 x i64> %a0, %a1 235 %cmp = icmp ule <2 x i64> %a0, %v1 236 %v2 = select <2 x i1> %cmp, <2 x i64> %v1, <2 x i64> <i64 -1, i64 -1> 237 ret <2 x i64> %v2 238} 239 240define <vscale x 2 x i64> @vselect_add_nxv2i64(<vscale x 2 x i64> %a0, <vscale x 2 x i64> %a1) { 241; CHECK-LABEL: vselect_add_nxv2i64: 242; CHECK: # %bb.0: 243; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma 244; CHECK-NEXT: vsaddu.vv v8, v8, v10 245; CHECK-NEXT: ret 246 %v1 = add <vscale x 2 x i64> %a0, %a1 247 %cmp = icmp ule <vscale x 2 x i64> %a0, %v1 248 %v2 = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %v1, <vscale x 2 x i64> splat (i64 -1) 249 ret <vscale x 2 x i64> %v2 250} 251 252; if the rhs is a constant we have to reverse the const canonicalization. 253; x >= ~C ? x+C : ~0 --> uaddsat x, C 254 255define <2 x i64> @vselect_add_const_2_v2i64(<2 x i64> %a0) { 256; CHECK-LABEL: vselect_add_const_2_v2i64: 257; CHECK: # %bb.0: 258; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma 259; CHECK-NEXT: vsaddu.vi v8, v8, 6 260; CHECK-NEXT: ret 261 %v1 = add <2 x i64> %a0, <i64 6, i64 6> 262 %cmp = icmp ule <2 x i64> %a0, <i64 -7, i64 -7> 263 %v2 = select <2 x i1> %cmp, <2 x i64> %v1, <2 x i64> <i64 -1, i64 -1> 264 ret <2 x i64> %v2 265} 266 267define <vscale x 2 x i64> @vselect_add_const_2_nxv2i64(<vscale x 2 x i64> %a0) { 268; CHECK-LABEL: vselect_add_const_2_nxv2i64: 269; CHECK: # %bb.0: 270; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma 271; CHECK-NEXT: vsaddu.vi v8, v8, 6 272; CHECK-NEXT: ret 273 %v1 = add <vscale x 2 x i64> %a0, splat (i64 6) 274 %cmp = icmp ule <vscale x 2 x i64> %a0, splat (i64 -7) 275 %v2 = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %v1, <vscale x 2 x i64> splat (i64 -1) 276 ret <vscale x 2 x i64> %v2 277} 278 279declare <2 x i64> @llvm.umin.v2i64(<2 x i64>, <2 x i64>) 280declare <2 x i64> @llvm.umax.v2i64(<2 x i64>, <2 x i64>) 281declare <vscale x 2 x i64> @llvm.umin.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>) 282declare <vscale x 2 x i64> @llvm.umax.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>) 283;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: 284; RV32: {{.*}} 285; RV64: {{.*}} 286