1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4 2 // REQUIRES: riscv-registered-target 3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v \ 4 // RUN: -target-feature +zvfbfmin \ 5 // RUN: -target-feature +zvfbfwma -disable-O0-optnone \ 6 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ 7 // RUN: FileCheck --check-prefix=CHECK-RV64 %s 8 9 #include <riscv_vector.h> 10 11 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vlseg7e16ff_v_bf16mf4x7_m( 12 // CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { 13 // CHECK-RV64-NEXT: entry: 14 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 2 x i8>, 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) poison, ptr [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4) 15 // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 2 x i8>, 7), i64 } [[TMP0]], 0 16 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 2 x i8>, 7), i64 } [[TMP0]], 1 17 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 18 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP1]] 19 // 20 vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_m(vbool64_t vm, const __bf16 *rs1, 21 size_t *new_vl, size_t vl) { 22 return __riscv_vlseg7e16ff(vm, rs1, new_vl, vl); 23 } 24 25 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vlseg7e16ff_v_bf16mf2x7_m( 26 // CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 27 // CHECK-RV64-NEXT: entry: 28 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 4 x i8>, 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) poison, ptr [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4) 29 // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 4 x i8>, 7), i64 } [[TMP0]], 0 30 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 4 x i8>, 7), i64 } [[TMP0]], 1 31 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 32 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP1]] 33 // 34 vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_m(vbool32_t vm, const __bf16 *rs1, 35 size_t *new_vl, size_t vl) { 36 return __riscv_vlseg7e16ff(vm, rs1, new_vl, vl); 37 } 38 39 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vlseg7e16ff_v_bf16m1x7_m( 40 // CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 41 // CHECK-RV64-NEXT: entry: 42 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 8 x i8>, 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) poison, ptr [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4) 43 // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 7), i64 } [[TMP0]], 0 44 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 7), i64 } [[TMP0]], 1 45 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 46 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP1]] 47 // 48 vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1, 49 size_t *new_vl, size_t vl) { 50 return __riscv_vlseg7e16ff(vm, rs1, new_vl, vl); 51 } 52