xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vadd-vp-mask.ll (revision 889a99ee58633da4dd8274958dd5a6f826944b18)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
3; RUN:   | FileCheck %s --check-prefixes=CHECK
4; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
5; RUN:   | FileCheck %s --check-prefixes=CHECK
6
7
8declare <vscale x 2 x i1> @llvm.vp.add.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>, i32)
9
10define <vscale x 2 x i1> @vadd_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
11; CHECK-LABEL: vadd_vv_nxv2i1:
12; CHECK:       # %bb.0:
13; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
14; CHECK-NEXT:    vmxor.mm v0, v0, v8
15; CHECK-NEXT:    ret
16  %v = call <vscale x 2 x i1> @llvm.vp.add.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %b, <vscale x 2 x i1> %m, i32 %evl)
17  ret <vscale x 2 x i1> %v
18}
19
20declare <vscale x 4 x i1> @llvm.vp.add.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>, i32)
21
22define <vscale x 4 x i1> @vadd_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
23; CHECK-LABEL: vadd_vv_nxv4i1:
24; CHECK:       # %bb.0:
25; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
26; CHECK-NEXT:    vmxor.mm v0, v0, v8
27; CHECK-NEXT:    ret
28  %v = call <vscale x 4 x i1> @llvm.vp.add.nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %b, <vscale x 4 x i1> %m, i32 %evl)
29  ret <vscale x 4 x i1> %v
30}
31
32declare <vscale x 8 x i1> @llvm.vp.add.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>, i32)
33
34define <vscale x 8 x i1> @vadd_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
35; CHECK-LABEL: vadd_vv_nxv8i1:
36; CHECK:       # %bb.0:
37; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
38; CHECK-NEXT:    vmxor.mm v0, v0, v8
39; CHECK-NEXT:    ret
40  %v = call <vscale x 8 x i1> @llvm.vp.add.nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %b, <vscale x 8 x i1> %m, i32 %evl)
41  ret <vscale x 8 x i1> %v
42}
43
44declare <vscale x 16 x i1> @llvm.vp.add.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, i32)
45
46define <vscale x 16 x i1> @vadd_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
47; CHECK-LABEL: vadd_vv_nxv16i1:
48; CHECK:       # %bb.0:
49; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
50; CHECK-NEXT:    vmxor.mm v0, v0, v8
51; CHECK-NEXT:    ret
52  %v = call <vscale x 16 x i1> @llvm.vp.add.nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %b, <vscale x 16 x i1> %m, i32 %evl)
53  ret <vscale x 16 x i1> %v
54}
55
56declare <vscale x 32 x i1> @llvm.vp.add.nxv32i1(<vscale x 32 x i1>, <vscale x 32 x i1>, <vscale x 32 x i1>, i32)
57
58define <vscale x 32 x i1> @vadd_vv_nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
59; CHECK-LABEL: vadd_vv_nxv32i1:
60; CHECK:       # %bb.0:
61; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
62; CHECK-NEXT:    vmxor.mm v0, v0, v8
63; CHECK-NEXT:    ret
64  %v = call <vscale x 32 x i1> @llvm.vp.add.nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %b, <vscale x 32 x i1> %m, i32 %evl)
65  ret <vscale x 32 x i1> %v
66}
67