xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vwsub-mask-sdnode.ll (revision dbb65dd330cc1696d7ca3dedc7aa9fa12c55a075)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
3; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
4
5define <vscale x 8 x i64> @vwsub_wv_mask_v8i32(<vscale x 8 x i32> %x, <vscale x 8 x i64> %y) {
6; CHECK-LABEL: vwsub_wv_mask_v8i32:
7; CHECK:       # %bb.0:
8; CHECK-NEXT:    li a0, 42
9; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
10; CHECK-NEXT:    vmslt.vx v0, v8, a0
11; CHECK-NEXT:    vsetvli zero, zero, e32, m4, tu, mu
12; CHECK-NEXT:    vwsub.wv v16, v16, v8, v0.t
13; CHECK-NEXT:    vmv8r.v v8, v16
14; CHECK-NEXT:    ret
15    %mask = icmp slt <vscale x 8 x i32> %x, splat (i32 42)
16    %a = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %x, <vscale x 8 x i32> zeroinitializer
17    %sa = sext <vscale x 8 x i32> %a to <vscale x 8 x i64>
18    %ret = sub <vscale x 8 x i64> %y, %sa
19    ret <vscale x 8 x i64> %ret
20}
21
22define <vscale x 8 x i64> @vwsubu_wv_mask_v8i32(<vscale x 8 x i32> %x, <vscale x 8 x i64> %y) {
23; CHECK-LABEL: vwsubu_wv_mask_v8i32:
24; CHECK:       # %bb.0:
25; CHECK-NEXT:    li a0, 42
26; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
27; CHECK-NEXT:    vmslt.vx v0, v8, a0
28; CHECK-NEXT:    vsetvli zero, zero, e32, m4, tu, mu
29; CHECK-NEXT:    vwsubu.wv v16, v16, v8, v0.t
30; CHECK-NEXT:    vmv8r.v v8, v16
31; CHECK-NEXT:    ret
32    %mask = icmp slt <vscale x 8 x i32> %x, splat (i32 42)
33    %a = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %x, <vscale x 8 x i32> zeroinitializer
34    %sa = zext <vscale x 8 x i32> %a to <vscale x 8 x i64>
35    %ret = sub <vscale x 8 x i64> %y, %sa
36    ret <vscale x 8 x i64> %ret
37}
38
39define <vscale x 8 x i64> @vwsubu_vv_mask_v8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %y) {
40; CHECK-LABEL: vwsubu_vv_mask_v8i32:
41; CHECK:       # %bb.0:
42; CHECK-NEXT:    li a0, 42
43; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
44; CHECK-NEXT:    vmslt.vx v0, v8, a0
45; CHECK-NEXT:    vmv.v.i v16, 0
46; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
47; CHECK-NEXT:    vwsubu.vv v16, v12, v8
48; CHECK-NEXT:    vmv8r.v v8, v16
49; CHECK-NEXT:    ret
50    %mask = icmp slt <vscale x 8 x i32> %x, splat (i32 42)
51    %a = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %x, <vscale x 8 x i32> zeroinitializer
52    %sa = zext <vscale x 8 x i32> %a to <vscale x 8 x i64>
53    %sy = zext <vscale x 8 x i32> %y to <vscale x 8 x i64>
54    %ret = sub <vscale x 8 x i64> %sy, %sa
55    ret <vscale x 8 x i64> %ret
56}
57
58define <vscale x 8 x i64> @vwsub_wv_mask_v8i32_nonzero(<vscale x 8 x i32> %x, <vscale x 8 x i64> %y) {
59; CHECK-LABEL: vwsub_wv_mask_v8i32_nonzero:
60; CHECK:       # %bb.0:
61; CHECK-NEXT:    li a0, 42
62; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
63; CHECK-NEXT:    vmslt.vx v0, v8, a0
64; CHECK-NEXT:    vmv.v.i v12, 1
65; CHECK-NEXT:    vmerge.vvm v24, v12, v8, v0
66; CHECK-NEXT:    vwsub.wv v8, v16, v24
67; CHECK-NEXT:    ret
68    %mask = icmp slt <vscale x 8 x i32> %x, splat (i32 42)
69    %a = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %x, <vscale x 8 x i32> splat (i32 1)
70    %sa = sext <vscale x 8 x i32> %a to <vscale x 8 x i64>
71    %ret = sub <vscale x 8 x i64> %y, %sa
72    ret <vscale x 8 x i64> %ret
73}
74