1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4 2 // REQUIRES: riscv-registered-target 3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v \ 4 // RUN: -target-feature +zvfbfmin \ 5 // RUN: -target-feature +zvfbfwma -disable-O0-optnone \ 6 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ 7 // RUN: FileCheck --check-prefix=CHECK-RV64 %s 8 9 #include <riscv_vector.h> 10 11 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei16_v_bf16mf4x2_tu( 12 // CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { 13 // CHECK-RV64-NEXT: entry: 14 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i16> [[RS2]], i64 [[VL]], i64 4) 15 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]] 16 // 17 vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, 18 const __bf16 *rs1, 19 vuint16mf4_t rs2, size_t vl) { 20 return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); 21 } 22 23 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei16_v_bf16mf2x2_tu( 24 // CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 25 // CHECK-RV64-NEXT: entry: 26 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i16> [[RS2]], i64 [[VL]], i64 4) 27 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]] 28 // 29 vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, 30 const __bf16 *rs1, 31 vuint16mf2_t rs2, size_t vl) { 32 return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); 33 } 34 35 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei16_v_bf16m1x2_tu( 36 // CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 37 // CHECK-RV64-NEXT: entry: 38 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i16> [[RS2]], i64 [[VL]], i64 4) 39 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]] 40 // 41 vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, 42 const __bf16 *rs1, 43 vuint16m1_t rs2, size_t vl) { 44 return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); 45 } 46 47 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei16_v_bf16m2x2_tu( 48 // CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 49 // CHECK-RV64-NEXT: entry: 50 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i16> [[RS2]], i64 [[VL]], i64 4) 51 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]] 52 // 53 vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, 54 const __bf16 *rs1, 55 vuint16m2_t rs2, size_t vl) { 56 return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); 57 } 58 59 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vluxseg2ei16_v_bf16m4x2_tu( 60 // CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 61 // CHECK-RV64-NEXT: entry: 62 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i16> [[RS2]], i64 [[VL]], i64 4) 63 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]] 64 // 65 vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, 66 const __bf16 *rs1, 67 vuint16m4_t rs2, size_t vl) { 68 return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); 69 } 70 71 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei16_v_bf16mf4x2_tum( 72 // CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 73 // CHECK-RV64-NEXT: entry: 74 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i16> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4) 75 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]] 76 // 77 vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tum(vbool64_t vm, 78 vbfloat16mf4x2_t vd, 79 const __bf16 *rs1, 80 vuint16mf4_t rs2, 81 size_t vl) { 82 return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); 83 } 84 85 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei16_v_bf16mf2x2_tum( 86 // CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 87 // CHECK-RV64-NEXT: entry: 88 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i16> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4) 89 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]] 90 // 91 vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tum(vbool32_t vm, 92 vbfloat16mf2x2_t vd, 93 const __bf16 *rs1, 94 vuint16mf2_t rs2, 95 size_t vl) { 96 return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); 97 } 98 99 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei16_v_bf16m1x2_tum( 100 // CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 101 // CHECK-RV64-NEXT: entry: 102 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i16> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4) 103 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]] 104 // 105 vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tum(vbool16_t vm, 106 vbfloat16m1x2_t vd, 107 const __bf16 *rs1, 108 vuint16m1_t rs2, size_t vl) { 109 return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); 110 } 111 112 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei16_v_bf16m2x2_tum( 113 // CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 114 // CHECK-RV64-NEXT: entry: 115 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i16> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4) 116 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]] 117 // 118 vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tum(vbool8_t vm, 119 vbfloat16m2x2_t vd, 120 const __bf16 *rs1, 121 vuint16m2_t rs2, size_t vl) { 122 return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); 123 } 124 125 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vluxseg2ei16_v_bf16m4x2_tum( 126 // CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 127 // CHECK-RV64-NEXT: entry: 128 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i16> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2, i64 4) 129 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]] 130 // 131 vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tum(vbool4_t vm, 132 vbfloat16m4x2_t vd, 133 const __bf16 *rs1, 134 vuint16m4_t rs2, size_t vl) { 135 return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); 136 } 137 138 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei16_v_bf16mf4x2_tumu( 139 // CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 140 // CHECK-RV64-NEXT: entry: 141 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i16> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4) 142 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]] 143 // 144 vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tumu(vbool64_t vm, 145 vbfloat16mf4x2_t vd, 146 const __bf16 *rs1, 147 vuint16mf4_t rs2, 148 size_t vl) { 149 return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); 150 } 151 152 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei16_v_bf16mf2x2_tumu( 153 // CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 154 // CHECK-RV64-NEXT: entry: 155 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i16> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4) 156 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]] 157 // 158 vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tumu(vbool32_t vm, 159 vbfloat16mf2x2_t vd, 160 const __bf16 *rs1, 161 vuint16mf2_t rs2, 162 size_t vl) { 163 return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); 164 } 165 166 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei16_v_bf16m1x2_tumu( 167 // CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 168 // CHECK-RV64-NEXT: entry: 169 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i16> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4) 170 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]] 171 // 172 vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tumu(vbool16_t vm, 173 vbfloat16m1x2_t vd, 174 const __bf16 *rs1, 175 vuint16m1_t rs2, size_t vl) { 176 return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); 177 } 178 179 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei16_v_bf16m2x2_tumu( 180 // CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 181 // CHECK-RV64-NEXT: entry: 182 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i16> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4) 183 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]] 184 // 185 vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tumu(vbool8_t vm, 186 vbfloat16m2x2_t vd, 187 const __bf16 *rs1, 188 vuint16m2_t rs2, size_t vl) { 189 return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); 190 } 191 192 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vluxseg2ei16_v_bf16m4x2_tumu( 193 // CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 194 // CHECK-RV64-NEXT: entry: 195 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i16> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0, i64 4) 196 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]] 197 // 198 vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tumu(vbool4_t vm, 199 vbfloat16m4x2_t vd, 200 const __bf16 *rs1, 201 vuint16m4_t rs2, size_t vl) { 202 return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); 203 } 204 205 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei16_v_bf16mf4x2_mu( 206 // CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 207 // CHECK-RV64-NEXT: entry: 208 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i16> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4) 209 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]] 210 // 211 vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_mu(vbool64_t vm, 212 vbfloat16mf4x2_t vd, 213 const __bf16 *rs1, 214 vuint16mf4_t rs2, size_t vl) { 215 return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); 216 } 217 218 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei16_v_bf16mf2x2_mu( 219 // CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 220 // CHECK-RV64-NEXT: entry: 221 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i16> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4) 222 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]] 223 // 224 vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_mu(vbool32_t vm, 225 vbfloat16mf2x2_t vd, 226 const __bf16 *rs1, 227 vuint16mf2_t rs2, size_t vl) { 228 return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); 229 } 230 231 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei16_v_bf16m1x2_mu( 232 // CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 233 // CHECK-RV64-NEXT: entry: 234 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i16> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4) 235 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]] 236 // 237 vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_mu(vbool16_t vm, 238 vbfloat16m1x2_t vd, 239 const __bf16 *rs1, 240 vuint16m1_t rs2, size_t vl) { 241 return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); 242 } 243 244 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei16_v_bf16m2x2_mu( 245 // CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 246 // CHECK-RV64-NEXT: entry: 247 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i16> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4) 248 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]] 249 // 250 vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, 251 const __bf16 *rs1, 252 vuint16m2_t rs2, size_t vl) { 253 return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); 254 } 255 256 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vluxseg2ei16_v_bf16m4x2_mu( 257 // CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 258 // CHECK-RV64-NEXT: entry: 259 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i16> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1, i64 4) 260 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]] 261 // 262 vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, 263 const __bf16 *rs1, 264 vuint16m4_t rs2, size_t vl) { 265 return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); 266 } 267