1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v \
4 // RUN:   -target-feature +zvfbfmin \
5 // RUN:   -target-feature +zvfbfwma -disable-O0-optnone \
6 // RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
7 // RUN:   FileCheck --check-prefix=CHECK-RV64 %s
8 
9 #include <riscv_vector.h>
10 
11 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16mf4x4(
12 // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i16> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
13 // CHECK-RV64-NEXT:  entry:
14 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 1 x i16> [[VS2]], i64 [[VL]], i64 4)
15 // CHECK-RV64-NEXT:    ret void
16 //
17 void test_vsuxseg4ei16_v_bf16mf4x4(__bf16 *rs1, vuint16mf4_t vs2,
18                                    vbfloat16mf4x4_t vs3, size_t vl) {
19   return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl);
20 }
21 
22 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16mf2x4(
23 // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i16> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
24 // CHECK-RV64-NEXT:  entry:
25 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 2 x i16> [[VS2]], i64 [[VL]], i64 4)
26 // CHECK-RV64-NEXT:    ret void
27 //
28 void test_vsuxseg4ei16_v_bf16mf2x4(__bf16 *rs1, vuint16mf2_t vs2,
29                                    vbfloat16mf2x4_t vs3, size_t vl) {
30   return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl);
31 }
32 
33 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16m1x4(
34 // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i16> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
35 // CHECK-RV64-NEXT:  entry:
36 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 4 x i16> [[VS2]], i64 [[VL]], i64 4)
37 // CHECK-RV64-NEXT:    ret void
38 //
39 void test_vsuxseg4ei16_v_bf16m1x4(__bf16 *rs1, vuint16m1_t vs2,
40                                   vbfloat16m1x4_t vs3, size_t vl) {
41   return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl);
42 }
43 
44 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16m2x4(
45 // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i16> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
46 // CHECK-RV64-NEXT:  entry:
47 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 8 x i16> [[VS2]], i64 [[VL]], i64 4)
48 // CHECK-RV64-NEXT:    ret void
49 //
50 void test_vsuxseg4ei16_v_bf16m2x4(__bf16 *rs1, vuint16m2_t vs2,
51                                   vbfloat16m2x4_t vs3, size_t vl) {
52   return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl);
53 }
54 
55 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16mf4x4_m(
56 // CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i16> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
57 // CHECK-RV64-NEXT:  entry:
58 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
59 // CHECK-RV64-NEXT:    ret void
60 //
61 void test_vsuxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1,
62                                      vuint16mf4_t vs2, vbfloat16mf4x4_t vs3,
63                                      size_t vl) {
64   return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl);
65 }
66 
67 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16mf2x4_m(
68 // CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i16> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
69 // CHECK-RV64-NEXT:  entry:
70 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
71 // CHECK-RV64-NEXT:    ret void
72 //
73 void test_vsuxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1,
74                                      vuint16mf2_t vs2, vbfloat16mf2x4_t vs3,
75                                      size_t vl) {
76   return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl);
77 }
78 
79 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16m1x4_m(
80 // CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i16> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
81 // CHECK-RV64-NEXT:  entry:
82 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
83 // CHECK-RV64-NEXT:    ret void
84 //
85 void test_vsuxseg4ei16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2,
86                                     vbfloat16m1x4_t vs3, size_t vl) {
87   return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl);
88 }
89 
90 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16m2x4_m(
91 // CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i16> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
92 // CHECK-RV64-NEXT:  entry:
93 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
94 // CHECK-RV64-NEXT:    ret void
95 //
96 void test_vsuxseg4ei16_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2,
97                                     vbfloat16m2x4_t vs3, size_t vl) {
98   return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl);
99 }
100