1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK 3; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK 4 5define <8 x i64> @vwadd_wv_mask_v8i32(<8 x i32> %x, <8 x i64> %y) { 6; CHECK-LABEL: vwadd_wv_mask_v8i32: 7; CHECK: # %bb.0: 8; CHECK-NEXT: li a0, 42 9; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma 10; CHECK-NEXT: vmslt.vx v0, v8, a0 11; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu 12; CHECK-NEXT: vwadd.wv v12, v12, v8, v0.t 13; CHECK-NEXT: vmv4r.v v8, v12 14; CHECK-NEXT: ret 15 %mask = icmp slt <8 x i32> %x, <i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42> 16 %a = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> zeroinitializer 17 %sa = sext <8 x i32> %a to <8 x i64> 18 %ret = add <8 x i64> %sa, %y 19 ret <8 x i64> %ret 20} 21 22define <8 x i64> @vwaddu_wv_mask_v8i32(<8 x i32> %x, <8 x i64> %y) { 23; CHECK-LABEL: vwaddu_wv_mask_v8i32: 24; CHECK: # %bb.0: 25; CHECK-NEXT: li a0, 42 26; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma 27; CHECK-NEXT: vmslt.vx v0, v8, a0 28; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu 29; CHECK-NEXT: vwaddu.wv v12, v12, v8, v0.t 30; CHECK-NEXT: vmv4r.v v8, v12 31; CHECK-NEXT: ret 32 %mask = icmp slt <8 x i32> %x, <i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42> 33 %a = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> zeroinitializer 34 %sa = zext <8 x i32> %a to <8 x i64> 35 %ret = add <8 x i64> %sa, %y 36 ret <8 x i64> %ret 37} 38 39define <8 x i64> @vwaddu_vv_mask_v8i32(<8 x i32> %x, <8 x i32> %y) { 40; CHECK-LABEL: vwaddu_vv_mask_v8i32: 41; CHECK: # %bb.0: 42; CHECK-NEXT: li a0, 42 43; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma 44; CHECK-NEXT: vmslt.vx v0, v8, a0 45; CHECK-NEXT: vmv.v.i v12, 0 46; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 47; CHECK-NEXT: vwaddu.vv v12, v8, v10 48; CHECK-NEXT: vmv4r.v v8, v12 49; CHECK-NEXT: ret 50 %mask = icmp slt <8 x i32> %x, <i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42> 51 %a = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> zeroinitializer 52 %sa = zext <8 x i32> %a to <8 x i64> 53 %sy = zext <8 x i32> %y to <8 x i64> 54 %ret = add <8 x i64> %sa, %sy 55 ret <8 x i64> %ret 56} 57 58define <8 x i64> @vwadd_wv_mask_v8i32_commutative(<8 x i32> %x, <8 x i64> %y) { 59; CHECK-LABEL: vwadd_wv_mask_v8i32_commutative: 60; CHECK: # %bb.0: 61; CHECK-NEXT: li a0, 42 62; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma 63; CHECK-NEXT: vmslt.vx v0, v8, a0 64; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu 65; CHECK-NEXT: vwadd.wv v12, v12, v8, v0.t 66; CHECK-NEXT: vmv4r.v v8, v12 67; CHECK-NEXT: ret 68 %mask = icmp slt <8 x i32> %x, <i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42> 69 %a = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> zeroinitializer 70 %sa = sext <8 x i32> %a to <8 x i64> 71 %ret = add <8 x i64> %y, %sa 72 ret <8 x i64> %ret 73} 74 75define <8 x i64> @vwadd_wv_mask_v8i32_nonzero(<8 x i32> %x, <8 x i64> %y) { 76; CHECK-LABEL: vwadd_wv_mask_v8i32_nonzero: 77; CHECK: # %bb.0: 78; CHECK-NEXT: li a0, 42 79; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma 80; CHECK-NEXT: vmslt.vx v0, v8, a0 81; CHECK-NEXT: vmv.v.i v10, 1 82; CHECK-NEXT: vmerge.vvm v16, v10, v8, v0 83; CHECK-NEXT: vwadd.wv v8, v12, v16 84; CHECK-NEXT: ret 85 %mask = icmp slt <8 x i32> %x, <i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42> 86 %a = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> 87 %sa = sext <8 x i32> %a to <8 x i64> 88 %ret = add <8 x i64> %y, %sa 89 ret <8 x i64> %ret 90} 91