xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vwadd-vp.ll (revision 0f20b9b92f5333a90cf7cd19d7ec2e27ee3eac06)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s
3
4define <vscale x 2 x i32> @vwadd_tu(<vscale x 2 x i8> %arg, <vscale x 2 x i32> %arg1, i32 signext %arg2) {
5; CHECK-LABEL: vwadd_tu:
6; CHECK:       # %bb.0: # %bb
7; CHECK-NEXT:    slli a0, a0, 32
8; CHECK-NEXT:    srli a0, a0, 32
9; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
10; CHECK-NEXT:    vsext.vf2 v10, v8
11; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, tu, ma
12; CHECK-NEXT:    vwadd.wv v9, v9, v10
13; CHECK-NEXT:    vmv1r.v v8, v9
14; CHECK-NEXT:    ret
15bb:
16  %tmp = call <vscale x 2 x i32> @llvm.vp.sext.nxv2i32.nxv2i8(<vscale x 2 x i8> %arg, <vscale x 2 x i1> splat (i1 true), i32 %arg2)
17  %tmp3 = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %arg1, <vscale x 2 x i32> %tmp, <vscale x 2 x i1> splat (i1 true), i32 %arg2)
18  %tmp4 = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> splat (i1 true), <vscale x 2 x i32> %tmp3, <vscale x 2 x i32> %arg1, i32 %arg2)
19  ret <vscale x 2 x i32> %tmp4
20}
21
22define <vscale x 2 x i32> @vwaddu_tu(<vscale x 2 x i8> %arg, <vscale x 2 x i32> %arg1, i32 signext %arg2) {
23; CHECK-LABEL: vwaddu_tu:
24; CHECK:       # %bb.0: # %bb
25; CHECK-NEXT:    slli a0, a0, 32
26; CHECK-NEXT:    srli a0, a0, 32
27; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
28; CHECK-NEXT:    vzext.vf2 v10, v8
29; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, tu, ma
30; CHECK-NEXT:    vwaddu.wv v9, v9, v10
31; CHECK-NEXT:    vmv1r.v v8, v9
32; CHECK-NEXT:    ret
33bb:
34  %tmp = call <vscale x 2 x i32> @llvm.vp.zext.nxv2i32.nxv2i8(<vscale x 2 x i8> %arg, <vscale x 2 x i1> splat (i1 true), i32 %arg2)
35  %tmp3 = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %arg1, <vscale x 2 x i32> %tmp, <vscale x 2 x i1> splat (i1 true), i32 %arg2)
36  %tmp4 = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> splat (i1 true), <vscale x 2 x i32> %tmp3, <vscale x 2 x i32> %arg1, i32 %arg2)
37  ret <vscale x 2 x i32> %tmp4
38}
39
40declare <vscale x 2 x i32> @llvm.vp.sext.nxv2i32.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, i32)
41declare <vscale x 2 x i32> @llvm.vp.zext.nxv2i32.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, i32)
42declare <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
43declare <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1>, <vscale x 2 x i32>, <vscale x 2 x i32>, i32)
44
45define <vscale x 2 x i32> @vwadd_vv_vpnxv2i32_vpnxv2i16_vpnxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %y, <vscale x 2 x i1> %m, i32 signext %evl) {
46; CHECK-LABEL: vwadd_vv_vpnxv2i32_vpnxv2i16_vpnxv2i16:
47; CHECK:       # %bb.0:
48; CHECK-NEXT:    slli a0, a0, 32
49; CHECK-NEXT:    srli a0, a0, 32
50; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
51; CHECK-NEXT:    vwadd.vv v10, v8, v9, v0.t
52; CHECK-NEXT:    vmv1r.v v8, v10
53; CHECK-NEXT:    ret
54  %x.sext = call <vscale x 2 x i32> @llvm.vp.sext.nxv2i32.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i1> %m, i32 %evl)
55  %y.sext = call <vscale x 2 x i32> @llvm.vp.sext.nxv2i32.nxv2i16(<vscale x 2 x i16> %y, <vscale x 2 x i1> %m, i32 %evl)
56  %add = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x.sext, <vscale x 2 x i32> %y.sext, <vscale x 2 x i1> %m, i32 %evl)
57  ret <vscale x 2 x i32> %add
58}
59
60define <vscale x 2 x i32> @vwadd_vv_vpnxv2i32_vpnxv2i16_nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %y, <vscale x 2 x i1> %m, i32 signext %evl) {
61; CHECK-LABEL: vwadd_vv_vpnxv2i32_vpnxv2i16_nxv2i16:
62; CHECK:       # %bb.0:
63; CHECK-NEXT:    slli a0, a0, 32
64; CHECK-NEXT:    srli a0, a0, 32
65; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
66; CHECK-NEXT:    vwadd.vv v10, v8, v9, v0.t
67; CHECK-NEXT:    vmv1r.v v8, v10
68; CHECK-NEXT:    ret
69  %x.sext = call <vscale x 2 x i32> @llvm.vp.sext.nxv2i32.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i1> %m, i32 %evl)
70  %y.sext = sext <vscale x 2 x i16> %y to <vscale x 2 x i32>
71  %add = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x.sext, <vscale x 2 x i32> %y.sext, <vscale x 2 x i1> %m, i32 %evl)
72  ret <vscale x 2 x i32> %add
73}
74
75define <vscale x 2 x i32> @vwadd_vv_vpnxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %y, <vscale x 2 x i1> %m, i32 signext %evl) {
76; CHECK-LABEL: vwadd_vv_vpnxv2i32_nxv2i16_nxv2i16:
77; CHECK:       # %bb.0:
78; CHECK-NEXT:    slli a0, a0, 32
79; CHECK-NEXT:    srli a0, a0, 32
80; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
81; CHECK-NEXT:    vwadd.vv v10, v8, v9, v0.t
82; CHECK-NEXT:    vmv1r.v v8, v10
83; CHECK-NEXT:    ret
84  %x.sext = sext <vscale x 2 x i16> %x to <vscale x 2 x i32>
85  %y.sext = sext <vscale x 2 x i16> %y to <vscale x 2 x i32>
86  %add = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x.sext, <vscale x 2 x i32> %y.sext, <vscale x 2 x i1> %m, i32 %evl)
87  ret <vscale x 2 x i32> %add
88}
89
90define <vscale x 2 x i32> @vwadd_vv_nxv2i32_vpnxv2i16_vpnxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %y, <vscale x 2 x i1> %m, i32 signext %evl) {
91; CHECK-LABEL: vwadd_vv_nxv2i32_vpnxv2i16_vpnxv2i16:
92; CHECK:       # %bb.0:
93; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
94; CHECK-NEXT:    vwadd.vv v10, v8, v9
95; CHECK-NEXT:    vmv1r.v v8, v10
96; CHECK-NEXT:    ret
97  %x.sext = call <vscale x 2 x i32> @llvm.vp.sext.nxv2i32.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i1> %m, i32 %evl)
98  %y.sext = call <vscale x 2 x i32> @llvm.vp.sext.nxv2i32.nxv2i16(<vscale x 2 x i16> %y, <vscale x 2 x i1> %m, i32 %evl)
99  %add = add <vscale x 2 x i32> %x.sext, %y.sext
100  ret <vscale x 2 x i32> %add
101}
102