1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 2 // REQUIRES: riscv-registered-target 3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ 4 // RUN: -target-feature +zvfhmin -disable-O0-optnone \ 5 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ 6 // RUN: FileCheck --check-prefix=CHECK-RV64 %s 7 8 #include <riscv_vector.h> 9 10 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei32_v_f16mf4x8_tu 11 // CHECK-RV64-SAME: (target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { 12 // CHECK-RV64-NEXT: entry: 13 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], i64 [[VL]], i64 4) 14 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]] 15 // 16 vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { 17 return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); 18 } 19 20 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_f16mf2x8_tu 21 // CHECK-RV64-SAME: (target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 22 // CHECK-RV64-NEXT: entry: 23 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 2 x i32> [[BINDEX]], i64 [[VL]], i64 4) 24 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]] 25 // 26 vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { 27 return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); 28 } 29 30 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_f16m1x8_tu 31 // CHECK-RV64-SAME: (target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 32 // CHECK-RV64-NEXT: entry: 33 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 4 x i32> [[BINDEX]], i64 [[VL]], i64 4) 34 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 35 // 36 vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { 37 return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); 38 } 39 40 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_f32mf2x8_tu 41 // CHECK-RV64-SAME: (target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 42 // CHECK-RV64-NEXT: entry: 43 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], i64 [[VL]], i64 5) 44 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]] 45 // 46 vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { 47 return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); 48 } 49 50 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_f32m1x8_tu 51 // CHECK-RV64-SAME: (target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 52 // CHECK-RV64-NEXT: entry: 53 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 2 x i32> [[BINDEX]], i64 [[VL]], i64 5) 54 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 55 // 56 vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { 57 return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); 58 } 59 60 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_f64m1x8_tu 61 // CHECK-RV64-SAME: (target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 62 // CHECK-RV64-NEXT: entry: 63 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], i64 [[VL]], i64 6) 64 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 65 // 66 vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { 67 return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); 68 } 69 70 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 1 x i8>, 8) @test_vluxseg8ei32_v_i8mf8x8_tu 71 // CHECK-RV64-SAME: (target("riscv.vector.tuple", <vscale x 1 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 72 // CHECK-RV64-NEXT: entry: 73 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 1 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], i64 [[VL]], i64 3) 74 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 1 x i8>, 8) [[TMP0]] 75 // 76 vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { 77 return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); 78 } 79 80 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei32_v_i8mf4x8_tu 81 // CHECK-RV64-SAME: (target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 82 // CHECK-RV64-NEXT: entry: 83 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 2 x i32> [[BINDEX]], i64 [[VL]], i64 3) 84 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]] 85 // 86 vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { 87 return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); 88 } 89 90 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_i8mf2x8_tu 91 // CHECK-RV64-SAME: (target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 92 // CHECK-RV64-NEXT: entry: 93 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 4 x i32> [[BINDEX]], i64 [[VL]], i64 3) 94 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]] 95 // 96 vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { 97 return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); 98 } 99 100 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_i8m1x8_tu 101 // CHECK-RV64-SAME: (target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 102 // CHECK-RV64-NEXT: entry: 103 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 8 x i32> [[BINDEX]], i64 [[VL]], i64 3) 104 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 105 // 106 vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { 107 return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); 108 } 109 110 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei32_v_i16mf4x8_tu 111 // CHECK-RV64-SAME: (target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 112 // CHECK-RV64-NEXT: entry: 113 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], i64 [[VL]], i64 4) 114 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]] 115 // 116 vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { 117 return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); 118 } 119 120 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_i16mf2x8_tu 121 // CHECK-RV64-SAME: (target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 122 // CHECK-RV64-NEXT: entry: 123 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 2 x i32> [[BINDEX]], i64 [[VL]], i64 4) 124 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]] 125 // 126 vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { 127 return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); 128 } 129 130 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_i16m1x8_tu 131 // CHECK-RV64-SAME: (target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 132 // CHECK-RV64-NEXT: entry: 133 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 4 x i32> [[BINDEX]], i64 [[VL]], i64 4) 134 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 135 // 136 vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { 137 return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); 138 } 139 140 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_i32mf2x8_tu 141 // CHECK-RV64-SAME: (target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 142 // CHECK-RV64-NEXT: entry: 143 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], i64 [[VL]], i64 5) 144 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]] 145 // 146 vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { 147 return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); 148 } 149 150 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_i32m1x8_tu 151 // CHECK-RV64-SAME: (target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 152 // CHECK-RV64-NEXT: entry: 153 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 2 x i32> [[BINDEX]], i64 [[VL]], i64 5) 154 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 155 // 156 vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { 157 return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); 158 } 159 160 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_i64m1x8_tu 161 // CHECK-RV64-SAME: (target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 162 // CHECK-RV64-NEXT: entry: 163 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], i64 [[VL]], i64 6) 164 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 165 // 166 vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { 167 return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); 168 } 169 170 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 1 x i8>, 8) @test_vluxseg8ei32_v_u8mf8x8_tu 171 // CHECK-RV64-SAME: (target("riscv.vector.tuple", <vscale x 1 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 172 // CHECK-RV64-NEXT: entry: 173 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 1 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], i64 [[VL]], i64 3) 174 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 1 x i8>, 8) [[TMP0]] 175 // 176 vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { 177 return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); 178 } 179 180 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei32_v_u8mf4x8_tu 181 // CHECK-RV64-SAME: (target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 182 // CHECK-RV64-NEXT: entry: 183 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 2 x i32> [[BINDEX]], i64 [[VL]], i64 3) 184 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]] 185 // 186 vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { 187 return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); 188 } 189 190 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_u8mf2x8_tu 191 // CHECK-RV64-SAME: (target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 192 // CHECK-RV64-NEXT: entry: 193 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 4 x i32> [[BINDEX]], i64 [[VL]], i64 3) 194 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]] 195 // 196 vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { 197 return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); 198 } 199 200 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_u8m1x8_tu 201 // CHECK-RV64-SAME: (target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 202 // CHECK-RV64-NEXT: entry: 203 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 8 x i32> [[BINDEX]], i64 [[VL]], i64 3) 204 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 205 // 206 vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { 207 return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); 208 } 209 210 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei32_v_u16mf4x8_tu 211 // CHECK-RV64-SAME: (target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 212 // CHECK-RV64-NEXT: entry: 213 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], i64 [[VL]], i64 4) 214 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]] 215 // 216 vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { 217 return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); 218 } 219 220 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_u16mf2x8_tu 221 // CHECK-RV64-SAME: (target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 222 // CHECK-RV64-NEXT: entry: 223 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 2 x i32> [[BINDEX]], i64 [[VL]], i64 4) 224 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]] 225 // 226 vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { 227 return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); 228 } 229 230 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_u16m1x8_tu 231 // CHECK-RV64-SAME: (target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 232 // CHECK-RV64-NEXT: entry: 233 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 4 x i32> [[BINDEX]], i64 [[VL]], i64 4) 234 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 235 // 236 vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { 237 return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); 238 } 239 240 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_u32mf2x8_tu 241 // CHECK-RV64-SAME: (target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 242 // CHECK-RV64-NEXT: entry: 243 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], i64 [[VL]], i64 5) 244 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]] 245 // 246 vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { 247 return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); 248 } 249 250 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_u32m1x8_tu 251 // CHECK-RV64-SAME: (target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 252 // CHECK-RV64-NEXT: entry: 253 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 2 x i32> [[BINDEX]], i64 [[VL]], i64 5) 254 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 255 // 256 vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { 257 return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); 258 } 259 260 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_u64m1x8_tu 261 // CHECK-RV64-SAME: (target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 262 // CHECK-RV64-NEXT: entry: 263 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], i64 [[VL]], i64 6) 264 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 265 // 266 vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { 267 return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); 268 } 269 270 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei32_v_f16mf4x8_tum 271 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 272 // CHECK-RV64-NEXT: entry: 273 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2, i64 4) 274 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]] 275 // 276 vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { 277 return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); 278 } 279 280 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_f16mf2x8_tum 281 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 282 // CHECK-RV64-NEXT: entry: 283 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 2 x i32> [[BINDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2, i64 4) 284 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]] 285 // 286 vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { 287 return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); 288 } 289 290 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_f16m1x8_tum 291 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 292 // CHECK-RV64-NEXT: entry: 293 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 4 x i32> [[BINDEX]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2, i64 4) 294 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 295 // 296 vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { 297 return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); 298 } 299 300 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_f32mf2x8_tum 301 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 302 // CHECK-RV64-NEXT: entry: 303 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2, i64 5) 304 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]] 305 // 306 vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { 307 return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); 308 } 309 310 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_f32m1x8_tum 311 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 312 // CHECK-RV64-NEXT: entry: 313 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 2 x i32> [[BINDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2, i64 5) 314 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 315 // 316 vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { 317 return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); 318 } 319 320 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_f64m1x8_tum 321 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 322 // CHECK-RV64-NEXT: entry: 323 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2, i64 6) 324 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 325 // 326 vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { 327 return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); 328 } 329 330 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 1 x i8>, 8) @test_vluxseg8ei32_v_i8mf8x8_tum 331 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 1 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 332 // CHECK-RV64-NEXT: entry: 333 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 1 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2, i64 3) 334 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 1 x i8>, 8) [[TMP0]] 335 // 336 vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { 337 return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); 338 } 339 340 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei32_v_i8mf4x8_tum 341 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 342 // CHECK-RV64-NEXT: entry: 343 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 2 x i32> [[BINDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2, i64 3) 344 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]] 345 // 346 vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { 347 return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); 348 } 349 350 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_i8mf2x8_tum 351 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 352 // CHECK-RV64-NEXT: entry: 353 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 4 x i32> [[BINDEX]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2, i64 3) 354 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]] 355 // 356 vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { 357 return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); 358 } 359 360 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_i8m1x8_tum 361 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 362 // CHECK-RV64-NEXT: entry: 363 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 8 x i32> [[BINDEX]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2, i64 3) 364 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 365 // 366 vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { 367 return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); 368 } 369 370 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei32_v_i16mf4x8_tum 371 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 372 // CHECK-RV64-NEXT: entry: 373 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2, i64 4) 374 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]] 375 // 376 vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { 377 return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); 378 } 379 380 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_i16mf2x8_tum 381 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 382 // CHECK-RV64-NEXT: entry: 383 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 2 x i32> [[BINDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2, i64 4) 384 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]] 385 // 386 vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { 387 return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); 388 } 389 390 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_i16m1x8_tum 391 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 392 // CHECK-RV64-NEXT: entry: 393 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 4 x i32> [[BINDEX]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2, i64 4) 394 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 395 // 396 vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { 397 return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); 398 } 399 400 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_i32mf2x8_tum 401 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 402 // CHECK-RV64-NEXT: entry: 403 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2, i64 5) 404 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]] 405 // 406 vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { 407 return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); 408 } 409 410 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_i32m1x8_tum 411 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 412 // CHECK-RV64-NEXT: entry: 413 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 2 x i32> [[BINDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2, i64 5) 414 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 415 // 416 vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { 417 return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); 418 } 419 420 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_i64m1x8_tum 421 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 422 // CHECK-RV64-NEXT: entry: 423 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2, i64 6) 424 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 425 // 426 vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { 427 return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); 428 } 429 430 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 1 x i8>, 8) @test_vluxseg8ei32_v_u8mf8x8_tum 431 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 1 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 432 // CHECK-RV64-NEXT: entry: 433 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 1 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2, i64 3) 434 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 1 x i8>, 8) [[TMP0]] 435 // 436 vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { 437 return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); 438 } 439 440 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei32_v_u8mf4x8_tum 441 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 442 // CHECK-RV64-NEXT: entry: 443 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 2 x i32> [[BINDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2, i64 3) 444 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]] 445 // 446 vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { 447 return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); 448 } 449 450 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_u8mf2x8_tum 451 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 452 // CHECK-RV64-NEXT: entry: 453 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 4 x i32> [[BINDEX]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2, i64 3) 454 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]] 455 // 456 vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { 457 return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); 458 } 459 460 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_u8m1x8_tum 461 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 462 // CHECK-RV64-NEXT: entry: 463 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 8 x i32> [[BINDEX]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2, i64 3) 464 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 465 // 466 vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { 467 return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); 468 } 469 470 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei32_v_u16mf4x8_tum 471 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 472 // CHECK-RV64-NEXT: entry: 473 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2, i64 4) 474 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]] 475 // 476 vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { 477 return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); 478 } 479 480 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_u16mf2x8_tum 481 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 482 // CHECK-RV64-NEXT: entry: 483 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 2 x i32> [[BINDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2, i64 4) 484 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]] 485 // 486 vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { 487 return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); 488 } 489 490 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_u16m1x8_tum 491 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 492 // CHECK-RV64-NEXT: entry: 493 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 4 x i32> [[BINDEX]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2, i64 4) 494 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 495 // 496 vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { 497 return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); 498 } 499 500 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_u32mf2x8_tum 501 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 502 // CHECK-RV64-NEXT: entry: 503 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2, i64 5) 504 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]] 505 // 506 vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { 507 return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); 508 } 509 510 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_u32m1x8_tum 511 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 512 // CHECK-RV64-NEXT: entry: 513 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 2 x i32> [[BINDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2, i64 5) 514 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 515 // 516 vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { 517 return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); 518 } 519 520 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_u64m1x8_tum 521 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 522 // CHECK-RV64-NEXT: entry: 523 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2, i64 6) 524 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 525 // 526 vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { 527 return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); 528 } 529 530 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei32_v_f16mf4x8_tumu 531 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 532 // CHECK-RV64-NEXT: entry: 533 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0, i64 4) 534 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]] 535 // 536 vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { 537 return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); 538 } 539 540 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_f16mf2x8_tumu 541 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 542 // CHECK-RV64-NEXT: entry: 543 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 2 x i32> [[BINDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0, i64 4) 544 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]] 545 // 546 vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { 547 return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); 548 } 549 550 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_f16m1x8_tumu 551 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 552 // CHECK-RV64-NEXT: entry: 553 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 4 x i32> [[BINDEX]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0, i64 4) 554 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 555 // 556 vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { 557 return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); 558 } 559 560 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_f32mf2x8_tumu 561 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 562 // CHECK-RV64-NEXT: entry: 563 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0, i64 5) 564 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]] 565 // 566 vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { 567 return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); 568 } 569 570 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_f32m1x8_tumu 571 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 572 // CHECK-RV64-NEXT: entry: 573 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 2 x i32> [[BINDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0, i64 5) 574 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 575 // 576 vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { 577 return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); 578 } 579 580 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_f64m1x8_tumu 581 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 582 // CHECK-RV64-NEXT: entry: 583 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0, i64 6) 584 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 585 // 586 vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { 587 return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); 588 } 589 590 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 1 x i8>, 8) @test_vluxseg8ei32_v_i8mf8x8_tumu 591 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 1 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 592 // CHECK-RV64-NEXT: entry: 593 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 1 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0, i64 3) 594 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 1 x i8>, 8) [[TMP0]] 595 // 596 vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { 597 return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); 598 } 599 600 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei32_v_i8mf4x8_tumu 601 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 602 // CHECK-RV64-NEXT: entry: 603 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 2 x i32> [[BINDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0, i64 3) 604 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]] 605 // 606 vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { 607 return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); 608 } 609 610 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_i8mf2x8_tumu 611 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 612 // CHECK-RV64-NEXT: entry: 613 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 4 x i32> [[BINDEX]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0, i64 3) 614 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]] 615 // 616 vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { 617 return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); 618 } 619 620 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_i8m1x8_tumu 621 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 622 // CHECK-RV64-NEXT: entry: 623 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 8 x i32> [[BINDEX]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0, i64 3) 624 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 625 // 626 vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { 627 return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); 628 } 629 630 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei32_v_i16mf4x8_tumu 631 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 632 // CHECK-RV64-NEXT: entry: 633 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0, i64 4) 634 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]] 635 // 636 vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { 637 return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); 638 } 639 640 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_i16mf2x8_tumu 641 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 642 // CHECK-RV64-NEXT: entry: 643 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 2 x i32> [[BINDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0, i64 4) 644 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]] 645 // 646 vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { 647 return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); 648 } 649 650 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_i16m1x8_tumu 651 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 652 // CHECK-RV64-NEXT: entry: 653 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 4 x i32> [[BINDEX]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0, i64 4) 654 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 655 // 656 vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { 657 return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); 658 } 659 660 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_i32mf2x8_tumu 661 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 662 // CHECK-RV64-NEXT: entry: 663 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0, i64 5) 664 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]] 665 // 666 vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { 667 return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); 668 } 669 670 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_i32m1x8_tumu 671 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 672 // CHECK-RV64-NEXT: entry: 673 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 2 x i32> [[BINDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0, i64 5) 674 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 675 // 676 vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { 677 return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); 678 } 679 680 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_i64m1x8_tumu 681 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 682 // CHECK-RV64-NEXT: entry: 683 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0, i64 6) 684 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 685 // 686 vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { 687 return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); 688 } 689 690 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 1 x i8>, 8) @test_vluxseg8ei32_v_u8mf8x8_tumu 691 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 1 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 692 // CHECK-RV64-NEXT: entry: 693 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 1 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0, i64 3) 694 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 1 x i8>, 8) [[TMP0]] 695 // 696 vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { 697 return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); 698 } 699 700 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei32_v_u8mf4x8_tumu 701 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 702 // CHECK-RV64-NEXT: entry: 703 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 2 x i32> [[BINDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0, i64 3) 704 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]] 705 // 706 vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { 707 return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); 708 } 709 710 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_u8mf2x8_tumu 711 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 712 // CHECK-RV64-NEXT: entry: 713 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 4 x i32> [[BINDEX]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0, i64 3) 714 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]] 715 // 716 vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { 717 return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); 718 } 719 720 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_u8m1x8_tumu 721 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 722 // CHECK-RV64-NEXT: entry: 723 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 8 x i32> [[BINDEX]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0, i64 3) 724 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 725 // 726 vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { 727 return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); 728 } 729 730 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei32_v_u16mf4x8_tumu 731 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 732 // CHECK-RV64-NEXT: entry: 733 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0, i64 4) 734 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]] 735 // 736 vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { 737 return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); 738 } 739 740 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_u16mf2x8_tumu 741 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 742 // CHECK-RV64-NEXT: entry: 743 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 2 x i32> [[BINDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0, i64 4) 744 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]] 745 // 746 vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { 747 return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); 748 } 749 750 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_u16m1x8_tumu 751 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 752 // CHECK-RV64-NEXT: entry: 753 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 4 x i32> [[BINDEX]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0, i64 4) 754 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 755 // 756 vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { 757 return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); 758 } 759 760 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_u32mf2x8_tumu 761 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 762 // CHECK-RV64-NEXT: entry: 763 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0, i64 5) 764 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]] 765 // 766 vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { 767 return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); 768 } 769 770 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_u32m1x8_tumu 771 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 772 // CHECK-RV64-NEXT: entry: 773 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 2 x i32> [[BINDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0, i64 5) 774 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 775 // 776 vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { 777 return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); 778 } 779 780 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_u64m1x8_tumu 781 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 782 // CHECK-RV64-NEXT: entry: 783 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0, i64 6) 784 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 785 // 786 vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { 787 return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); 788 } 789 790 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei32_v_f16mf4x8_mu 791 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 792 // CHECK-RV64-NEXT: entry: 793 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1, i64 4) 794 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]] 795 // 796 vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { 797 return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); 798 } 799 800 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_f16mf2x8_mu 801 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 802 // CHECK-RV64-NEXT: entry: 803 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 2 x i32> [[BINDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1, i64 4) 804 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]] 805 // 806 vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { 807 return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); 808 } 809 810 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_f16m1x8_mu 811 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 812 // CHECK-RV64-NEXT: entry: 813 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 4 x i32> [[BINDEX]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1, i64 4) 814 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 815 // 816 vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { 817 return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); 818 } 819 820 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_f32mf2x8_mu 821 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 822 // CHECK-RV64-NEXT: entry: 823 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1, i64 5) 824 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]] 825 // 826 vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { 827 return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); 828 } 829 830 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_f32m1x8_mu 831 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 832 // CHECK-RV64-NEXT: entry: 833 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 2 x i32> [[BINDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1, i64 5) 834 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 835 // 836 vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { 837 return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); 838 } 839 840 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_f64m1x8_mu 841 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 842 // CHECK-RV64-NEXT: entry: 843 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1, i64 6) 844 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 845 // 846 vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { 847 return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); 848 } 849 850 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 1 x i8>, 8) @test_vluxseg8ei32_v_i8mf8x8_mu 851 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 1 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 852 // CHECK-RV64-NEXT: entry: 853 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 1 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1, i64 3) 854 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 1 x i8>, 8) [[TMP0]] 855 // 856 vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { 857 return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); 858 } 859 860 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei32_v_i8mf4x8_mu 861 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 862 // CHECK-RV64-NEXT: entry: 863 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 2 x i32> [[BINDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1, i64 3) 864 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]] 865 // 866 vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { 867 return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); 868 } 869 870 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_i8mf2x8_mu 871 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 872 // CHECK-RV64-NEXT: entry: 873 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 4 x i32> [[BINDEX]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1, i64 3) 874 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]] 875 // 876 vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { 877 return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); 878 } 879 880 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_i8m1x8_mu 881 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 882 // CHECK-RV64-NEXT: entry: 883 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 8 x i32> [[BINDEX]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1, i64 3) 884 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 885 // 886 vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { 887 return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); 888 } 889 890 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei32_v_i16mf4x8_mu 891 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 892 // CHECK-RV64-NEXT: entry: 893 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1, i64 4) 894 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]] 895 // 896 vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { 897 return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); 898 } 899 900 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_i16mf2x8_mu 901 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 902 // CHECK-RV64-NEXT: entry: 903 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 2 x i32> [[BINDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1, i64 4) 904 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]] 905 // 906 vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { 907 return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); 908 } 909 910 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_i16m1x8_mu 911 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 912 // CHECK-RV64-NEXT: entry: 913 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 4 x i32> [[BINDEX]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1, i64 4) 914 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 915 // 916 vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { 917 return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); 918 } 919 920 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_i32mf2x8_mu 921 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 922 // CHECK-RV64-NEXT: entry: 923 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1, i64 5) 924 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]] 925 // 926 vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { 927 return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); 928 } 929 930 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_i32m1x8_mu 931 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 932 // CHECK-RV64-NEXT: entry: 933 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 2 x i32> [[BINDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1, i64 5) 934 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 935 // 936 vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { 937 return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); 938 } 939 940 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_i64m1x8_mu 941 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 942 // CHECK-RV64-NEXT: entry: 943 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1, i64 6) 944 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 945 // 946 vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { 947 return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); 948 } 949 950 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 1 x i8>, 8) @test_vluxseg8ei32_v_u8mf8x8_mu 951 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 1 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 952 // CHECK-RV64-NEXT: entry: 953 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 1 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1, i64 3) 954 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 1 x i8>, 8) [[TMP0]] 955 // 956 vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { 957 return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); 958 } 959 960 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei32_v_u8mf4x8_mu 961 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 962 // CHECK-RV64-NEXT: entry: 963 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 2 x i32> [[BINDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1, i64 3) 964 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]] 965 // 966 vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { 967 return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); 968 } 969 970 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_u8mf2x8_mu 971 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 972 // CHECK-RV64-NEXT: entry: 973 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 4 x i32> [[BINDEX]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1, i64 3) 974 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]] 975 // 976 vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { 977 return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); 978 } 979 980 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_u8m1x8_mu 981 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 982 // CHECK-RV64-NEXT: entry: 983 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 8 x i32> [[BINDEX]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1, i64 3) 984 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 985 // 986 vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { 987 return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); 988 } 989 990 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei32_v_u16mf4x8_mu 991 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 992 // CHECK-RV64-NEXT: entry: 993 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1, i64 4) 994 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]] 995 // 996 vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { 997 return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); 998 } 999 1000 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_u16mf2x8_mu 1001 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1002 // CHECK-RV64-NEXT: entry: 1003 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 2 x i32> [[BINDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1, i64 4) 1004 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]] 1005 // 1006 vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { 1007 return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); 1008 } 1009 1010 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_u16m1x8_mu 1011 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1012 // CHECK-RV64-NEXT: entry: 1013 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 4 x i32> [[BINDEX]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1, i64 4) 1014 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 1015 // 1016 vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { 1017 return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); 1018 } 1019 1020 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_u32mf2x8_mu 1021 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1022 // CHECK-RV64-NEXT: entry: 1023 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1, i64 5) 1024 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]] 1025 // 1026 vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { 1027 return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); 1028 } 1029 1030 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_u32m1x8_mu 1031 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1032 // CHECK-RV64-NEXT: entry: 1033 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 2 x i32> [[BINDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1, i64 5) 1034 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 1035 // 1036 vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { 1037 return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); 1038 } 1039 1040 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_u64m1x8_mu 1041 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1042 // CHECK-RV64-NEXT: entry: 1043 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i32> [[BINDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1, i64 6) 1044 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]] 1045 // 1046 vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { 1047 return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); 1048 } 1049 1050