1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 2 // REQUIRES: riscv-registered-target 3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ 4 // RUN: -target-feature +zvbb \ 5 // RUN: -target-feature +zvbc \ 6 // RUN: -target-feature +zvkb \ 7 // RUN: -target-feature +zvkg \ 8 // RUN: -target-feature +zvkned \ 9 // RUN: -target-feature +zvknhb \ 10 // RUN: -target-feature +zvksed \ 11 // RUN: -target-feature +zvksh \ 12 // RUN: -disable-O0-optnone \ 13 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ 14 // RUN: FileCheck --check-prefix=CHECK-RV64 %s 15 16 #include <riscv_vector.h> 17 18 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vbrev8_v_u8mf8_tu 19 // CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { 20 // CHECK-RV64-NEXT: entry: 21 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vbrev8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VS2]], i64 [[VL]]) 22 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]] 23 // 24 vuint8mf8_t test_vbrev8_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { 25 return __riscv_vbrev8_tu(maskedoff, vs2, vl); 26 } 27 28 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vbrev8_v_u8mf4_tu 29 // CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 30 // CHECK-RV64-NEXT: entry: 31 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vbrev8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VS2]], i64 [[VL]]) 32 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]] 33 // 34 vuint8mf4_t test_vbrev8_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { 35 return __riscv_vbrev8_tu(maskedoff, vs2, vl); 36 } 37 38 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vbrev8_v_u8mf2_tu 39 // CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 40 // CHECK-RV64-NEXT: entry: 41 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vbrev8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VS2]], i64 [[VL]]) 42 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]] 43 // 44 vuint8mf2_t test_vbrev8_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { 45 return __riscv_vbrev8_tu(maskedoff, vs2, vl); 46 } 47 48 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vbrev8_v_u8m1_tu 49 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 50 // CHECK-RV64-NEXT: entry: 51 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vbrev8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VS2]], i64 [[VL]]) 52 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]] 53 // 54 vuint8m1_t test_vbrev8_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { 55 return __riscv_vbrev8_tu(maskedoff, vs2, vl); 56 } 57 58 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vbrev8_v_u8m2_tu 59 // CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 60 // CHECK-RV64-NEXT: entry: 61 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vbrev8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VS2]], i64 [[VL]]) 62 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]] 63 // 64 vuint8m2_t test_vbrev8_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { 65 return __riscv_vbrev8_tu(maskedoff, vs2, vl); 66 } 67 68 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vbrev8_v_u8m4_tu 69 // CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 70 // CHECK-RV64-NEXT: entry: 71 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vbrev8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VS2]], i64 [[VL]]) 72 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]] 73 // 74 vuint8m4_t test_vbrev8_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { 75 return __riscv_vbrev8_tu(maskedoff, vs2, vl); 76 } 77 78 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vbrev8_v_u8m8_tu 79 // CHECK-RV64-SAME: (<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 80 // CHECK-RV64-NEXT: entry: 81 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vbrev8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VS2]], i64 [[VL]]) 82 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]] 83 // 84 vuint8m8_t test_vbrev8_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { 85 return __riscv_vbrev8_tu(maskedoff, vs2, vl); 86 } 87 88 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vbrev8_v_u16mf4_tu 89 // CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 90 // CHECK-RV64-NEXT: entry: 91 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vbrev8.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VS2]], i64 [[VL]]) 92 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]] 93 // 94 vuint16mf4_t test_vbrev8_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { 95 return __riscv_vbrev8_tu(maskedoff, vs2, vl); 96 } 97 98 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vbrev8_v_u16mf2_tu 99 // CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 100 // CHECK-RV64-NEXT: entry: 101 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vbrev8.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VS2]], i64 [[VL]]) 102 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]] 103 // 104 vuint16mf2_t test_vbrev8_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { 105 return __riscv_vbrev8_tu(maskedoff, vs2, vl); 106 } 107 108 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vbrev8_v_u16m1_tu 109 // CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 110 // CHECK-RV64-NEXT: entry: 111 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vbrev8.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VS2]], i64 [[VL]]) 112 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]] 113 // 114 vuint16m1_t test_vbrev8_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { 115 return __riscv_vbrev8_tu(maskedoff, vs2, vl); 116 } 117 118 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vbrev8_v_u16m2_tu 119 // CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 120 // CHECK-RV64-NEXT: entry: 121 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vbrev8.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VS2]], i64 [[VL]]) 122 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]] 123 // 124 vuint16m2_t test_vbrev8_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { 125 return __riscv_vbrev8_tu(maskedoff, vs2, vl); 126 } 127 128 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vbrev8_v_u16m4_tu 129 // CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 130 // CHECK-RV64-NEXT: entry: 131 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vbrev8.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VS2]], i64 [[VL]]) 132 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]] 133 // 134 vuint16m4_t test_vbrev8_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { 135 return __riscv_vbrev8_tu(maskedoff, vs2, vl); 136 } 137 138 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vbrev8_v_u16m8_tu 139 // CHECK-RV64-SAME: (<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 140 // CHECK-RV64-NEXT: entry: 141 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vbrev8.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VS2]], i64 [[VL]]) 142 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]] 143 // 144 vuint16m8_t test_vbrev8_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { 145 return __riscv_vbrev8_tu(maskedoff, vs2, vl); 146 } 147 148 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vbrev8_v_u32mf2_tu 149 // CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 150 // CHECK-RV64-NEXT: entry: 151 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vbrev8.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VS2]], i64 [[VL]]) 152 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]] 153 // 154 vuint32mf2_t test_vbrev8_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { 155 return __riscv_vbrev8_tu(maskedoff, vs2, vl); 156 } 157 158 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vbrev8_v_u32m1_tu 159 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 160 // CHECK-RV64-NEXT: entry: 161 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vbrev8.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VS2]], i64 [[VL]]) 162 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]] 163 // 164 vuint32m1_t test_vbrev8_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { 165 return __riscv_vbrev8_tu(maskedoff, vs2, vl); 166 } 167 168 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vbrev8_v_u32m2_tu 169 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 170 // CHECK-RV64-NEXT: entry: 171 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vbrev8.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VS2]], i64 [[VL]]) 172 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]] 173 // 174 vuint32m2_t test_vbrev8_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { 175 return __riscv_vbrev8_tu(maskedoff, vs2, vl); 176 } 177 178 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vbrev8_v_u32m4_tu 179 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 180 // CHECK-RV64-NEXT: entry: 181 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vbrev8.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VS2]], i64 [[VL]]) 182 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]] 183 // 184 vuint32m4_t test_vbrev8_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { 185 return __riscv_vbrev8_tu(maskedoff, vs2, vl); 186 } 187 188 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vbrev8_v_u32m8_tu 189 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 190 // CHECK-RV64-NEXT: entry: 191 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vbrev8.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VS2]], i64 [[VL]]) 192 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]] 193 // 194 vuint32m8_t test_vbrev8_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { 195 return __riscv_vbrev8_tu(maskedoff, vs2, vl); 196 } 197 198 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vbrev8_v_u64m1_tu 199 // CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 200 // CHECK-RV64-NEXT: entry: 201 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vbrev8.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VS2]], i64 [[VL]]) 202 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] 203 // 204 vuint64m1_t test_vbrev8_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { 205 return __riscv_vbrev8_tu(maskedoff, vs2, vl); 206 } 207 208 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vbrev8_v_u64m2_tu 209 // CHECK-RV64-SAME: (<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 210 // CHECK-RV64-NEXT: entry: 211 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vbrev8.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VS2]], i64 [[VL]]) 212 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] 213 // 214 vuint64m2_t test_vbrev8_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { 215 return __riscv_vbrev8_tu(maskedoff, vs2, vl); 216 } 217 218 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vbrev8_v_u64m4_tu 219 // CHECK-RV64-SAME: (<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 220 // CHECK-RV64-NEXT: entry: 221 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vbrev8.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VS2]], i64 [[VL]]) 222 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] 223 // 224 vuint64m4_t test_vbrev8_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { 225 return __riscv_vbrev8_tu(maskedoff, vs2, vl); 226 } 227 228 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vbrev8_v_u64m8_tu 229 // CHECK-RV64-SAME: (<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 230 // CHECK-RV64-NEXT: entry: 231 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vbrev8.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VS2]], i64 [[VL]]) 232 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] 233 // 234 vuint64m8_t test_vbrev8_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { 235 return __riscv_vbrev8_tu(maskedoff, vs2, vl); 236 } 237 238 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vbrev8_v_u8mf8_tum 239 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 240 // CHECK-RV64-NEXT: entry: 241 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vbrev8.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2) 242 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]] 243 // 244 vuint8mf8_t test_vbrev8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { 245 return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); 246 } 247 248 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vbrev8_v_u8mf4_tum 249 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 250 // CHECK-RV64-NEXT: entry: 251 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vbrev8.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2) 252 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]] 253 // 254 vuint8mf4_t test_vbrev8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { 255 return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); 256 } 257 258 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vbrev8_v_u8mf2_tum 259 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 260 // CHECK-RV64-NEXT: entry: 261 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vbrev8.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2) 262 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]] 263 // 264 vuint8mf2_t test_vbrev8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { 265 return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); 266 } 267 268 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vbrev8_v_u8m1_tum 269 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 270 // CHECK-RV64-NEXT: entry: 271 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vbrev8.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2) 272 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]] 273 // 274 vuint8m1_t test_vbrev8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { 275 return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); 276 } 277 278 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vbrev8_v_u8m2_tum 279 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 280 // CHECK-RV64-NEXT: entry: 281 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vbrev8.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2) 282 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]] 283 // 284 vuint8m2_t test_vbrev8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { 285 return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); 286 } 287 288 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vbrev8_v_u8m4_tum 289 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 290 // CHECK-RV64-NEXT: entry: 291 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vbrev8.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2) 292 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]] 293 // 294 vuint8m4_t test_vbrev8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { 295 return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); 296 } 297 298 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vbrev8_v_u8m8_tum 299 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 300 // CHECK-RV64-NEXT: entry: 301 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vbrev8.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 2) 302 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]] 303 // 304 vuint8m8_t test_vbrev8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { 305 return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); 306 } 307 308 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vbrev8_v_u16mf4_tum 309 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 310 // CHECK-RV64-NEXT: entry: 311 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vbrev8.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2) 312 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]] 313 // 314 vuint16mf4_t test_vbrev8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { 315 return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); 316 } 317 318 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vbrev8_v_u16mf2_tum 319 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 320 // CHECK-RV64-NEXT: entry: 321 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vbrev8.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2) 322 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]] 323 // 324 vuint16mf2_t test_vbrev8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { 325 return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); 326 } 327 328 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vbrev8_v_u16m1_tum 329 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 330 // CHECK-RV64-NEXT: entry: 331 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vbrev8.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2) 332 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]] 333 // 334 vuint16m1_t test_vbrev8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { 335 return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); 336 } 337 338 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vbrev8_v_u16m2_tum 339 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 340 // CHECK-RV64-NEXT: entry: 341 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vbrev8.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2) 342 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]] 343 // 344 vuint16m2_t test_vbrev8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { 345 return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); 346 } 347 348 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vbrev8_v_u16m4_tum 349 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 350 // CHECK-RV64-NEXT: entry: 351 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vbrev8.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2) 352 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]] 353 // 354 vuint16m4_t test_vbrev8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { 355 return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); 356 } 357 358 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vbrev8_v_u16m8_tum 359 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 360 // CHECK-RV64-NEXT: entry: 361 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vbrev8.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2) 362 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]] 363 // 364 vuint16m8_t test_vbrev8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { 365 return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); 366 } 367 368 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vbrev8_v_u32mf2_tum 369 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 370 // CHECK-RV64-NEXT: entry: 371 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vbrev8.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2) 372 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]] 373 // 374 vuint32mf2_t test_vbrev8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { 375 return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); 376 } 377 378 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vbrev8_v_u32m1_tum 379 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 380 // CHECK-RV64-NEXT: entry: 381 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vbrev8.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2) 382 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]] 383 // 384 vuint32m1_t test_vbrev8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { 385 return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); 386 } 387 388 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vbrev8_v_u32m2_tum 389 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 390 // CHECK-RV64-NEXT: entry: 391 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vbrev8.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2) 392 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]] 393 // 394 vuint32m2_t test_vbrev8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { 395 return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); 396 } 397 398 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vbrev8_v_u32m4_tum 399 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 400 // CHECK-RV64-NEXT: entry: 401 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vbrev8.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2) 402 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]] 403 // 404 vuint32m4_t test_vbrev8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { 405 return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); 406 } 407 408 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vbrev8_v_u32m8_tum 409 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 410 // CHECK-RV64-NEXT: entry: 411 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vbrev8.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2) 412 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]] 413 // 414 vuint32m8_t test_vbrev8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { 415 return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); 416 } 417 418 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vbrev8_v_u64m1_tum 419 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 420 // CHECK-RV64-NEXT: entry: 421 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vbrev8.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2) 422 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] 423 // 424 vuint64m1_t test_vbrev8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { 425 return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); 426 } 427 428 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vbrev8_v_u64m2_tum 429 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 430 // CHECK-RV64-NEXT: entry: 431 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vbrev8.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2) 432 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] 433 // 434 vuint64m2_t test_vbrev8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { 435 return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); 436 } 437 438 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vbrev8_v_u64m4_tum 439 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 440 // CHECK-RV64-NEXT: entry: 441 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vbrev8.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2) 442 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] 443 // 444 vuint64m4_t test_vbrev8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { 445 return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); 446 } 447 448 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vbrev8_v_u64m8_tum 449 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 450 // CHECK-RV64-NEXT: entry: 451 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vbrev8.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2) 452 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] 453 // 454 vuint64m8_t test_vbrev8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { 455 return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); 456 } 457 458 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vbrev8_v_u8mf8_tumu 459 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 460 // CHECK-RV64-NEXT: entry: 461 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vbrev8.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0) 462 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]] 463 // 464 vuint8mf8_t test_vbrev8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { 465 return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); 466 } 467 468 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vbrev8_v_u8mf4_tumu 469 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 470 // CHECK-RV64-NEXT: entry: 471 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vbrev8.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0) 472 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]] 473 // 474 vuint8mf4_t test_vbrev8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { 475 return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); 476 } 477 478 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vbrev8_v_u8mf2_tumu 479 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 480 // CHECK-RV64-NEXT: entry: 481 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vbrev8.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0) 482 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]] 483 // 484 vuint8mf2_t test_vbrev8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { 485 return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); 486 } 487 488 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vbrev8_v_u8m1_tumu 489 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 490 // CHECK-RV64-NEXT: entry: 491 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vbrev8.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0) 492 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]] 493 // 494 vuint8m1_t test_vbrev8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { 495 return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); 496 } 497 498 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vbrev8_v_u8m2_tumu 499 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 500 // CHECK-RV64-NEXT: entry: 501 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vbrev8.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0) 502 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]] 503 // 504 vuint8m2_t test_vbrev8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { 505 return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); 506 } 507 508 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vbrev8_v_u8m4_tumu 509 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 510 // CHECK-RV64-NEXT: entry: 511 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vbrev8.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0) 512 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]] 513 // 514 vuint8m4_t test_vbrev8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { 515 return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); 516 } 517 518 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vbrev8_v_u8m8_tumu 519 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 520 // CHECK-RV64-NEXT: entry: 521 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vbrev8.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 0) 522 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]] 523 // 524 vuint8m8_t test_vbrev8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { 525 return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); 526 } 527 528 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vbrev8_v_u16mf4_tumu 529 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 530 // CHECK-RV64-NEXT: entry: 531 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vbrev8.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0) 532 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]] 533 // 534 vuint16mf4_t test_vbrev8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { 535 return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); 536 } 537 538 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vbrev8_v_u16mf2_tumu 539 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 540 // CHECK-RV64-NEXT: entry: 541 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vbrev8.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0) 542 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]] 543 // 544 vuint16mf2_t test_vbrev8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { 545 return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); 546 } 547 548 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vbrev8_v_u16m1_tumu 549 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 550 // CHECK-RV64-NEXT: entry: 551 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vbrev8.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0) 552 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]] 553 // 554 vuint16m1_t test_vbrev8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { 555 return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); 556 } 557 558 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vbrev8_v_u16m2_tumu 559 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 560 // CHECK-RV64-NEXT: entry: 561 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vbrev8.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0) 562 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]] 563 // 564 vuint16m2_t test_vbrev8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { 565 return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); 566 } 567 568 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vbrev8_v_u16m4_tumu 569 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 570 // CHECK-RV64-NEXT: entry: 571 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vbrev8.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0) 572 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]] 573 // 574 vuint16m4_t test_vbrev8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { 575 return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); 576 } 577 578 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vbrev8_v_u16m8_tumu 579 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 580 // CHECK-RV64-NEXT: entry: 581 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vbrev8.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0) 582 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]] 583 // 584 vuint16m8_t test_vbrev8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { 585 return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); 586 } 587 588 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vbrev8_v_u32mf2_tumu 589 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 590 // CHECK-RV64-NEXT: entry: 591 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vbrev8.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0) 592 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]] 593 // 594 vuint32mf2_t test_vbrev8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { 595 return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); 596 } 597 598 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vbrev8_v_u32m1_tumu 599 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 600 // CHECK-RV64-NEXT: entry: 601 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vbrev8.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0) 602 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]] 603 // 604 vuint32m1_t test_vbrev8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { 605 return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); 606 } 607 608 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vbrev8_v_u32m2_tumu 609 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 610 // CHECK-RV64-NEXT: entry: 611 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vbrev8.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0) 612 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]] 613 // 614 vuint32m2_t test_vbrev8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { 615 return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); 616 } 617 618 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vbrev8_v_u32m4_tumu 619 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 620 // CHECK-RV64-NEXT: entry: 621 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vbrev8.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0) 622 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]] 623 // 624 vuint32m4_t test_vbrev8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { 625 return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); 626 } 627 628 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vbrev8_v_u32m8_tumu 629 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 630 // CHECK-RV64-NEXT: entry: 631 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vbrev8.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0) 632 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]] 633 // 634 vuint32m8_t test_vbrev8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { 635 return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); 636 } 637 638 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vbrev8_v_u64m1_tumu 639 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 640 // CHECK-RV64-NEXT: entry: 641 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vbrev8.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0) 642 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] 643 // 644 vuint64m1_t test_vbrev8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { 645 return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); 646 } 647 648 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vbrev8_v_u64m2_tumu 649 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 650 // CHECK-RV64-NEXT: entry: 651 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vbrev8.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0) 652 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] 653 // 654 vuint64m2_t test_vbrev8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { 655 return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); 656 } 657 658 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vbrev8_v_u64m4_tumu 659 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 660 // CHECK-RV64-NEXT: entry: 661 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vbrev8.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0) 662 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] 663 // 664 vuint64m4_t test_vbrev8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { 665 return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); 666 } 667 668 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vbrev8_v_u64m8_tumu 669 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 670 // CHECK-RV64-NEXT: entry: 671 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vbrev8.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0) 672 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] 673 // 674 vuint64m8_t test_vbrev8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { 675 return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); 676 } 677 678 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vbrev8_v_u8mf8_mu 679 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 680 // CHECK-RV64-NEXT: entry: 681 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vbrev8.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1) 682 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]] 683 // 684 vuint8mf8_t test_vbrev8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { 685 return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); 686 } 687 688 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vbrev8_v_u8mf4_mu 689 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 690 // CHECK-RV64-NEXT: entry: 691 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vbrev8.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1) 692 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]] 693 // 694 vuint8mf4_t test_vbrev8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { 695 return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); 696 } 697 698 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vbrev8_v_u8mf2_mu 699 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 700 // CHECK-RV64-NEXT: entry: 701 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vbrev8.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1) 702 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]] 703 // 704 vuint8mf2_t test_vbrev8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { 705 return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); 706 } 707 708 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vbrev8_v_u8m1_mu 709 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 710 // CHECK-RV64-NEXT: entry: 711 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vbrev8.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1) 712 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]] 713 // 714 vuint8m1_t test_vbrev8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { 715 return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); 716 } 717 718 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vbrev8_v_u8m2_mu 719 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 720 // CHECK-RV64-NEXT: entry: 721 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vbrev8.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1) 722 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]] 723 // 724 vuint8m2_t test_vbrev8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { 725 return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); 726 } 727 728 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vbrev8_v_u8m4_mu 729 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 730 // CHECK-RV64-NEXT: entry: 731 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vbrev8.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1) 732 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]] 733 // 734 vuint8m4_t test_vbrev8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { 735 return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); 736 } 737 738 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vbrev8_v_u8m8_mu 739 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 740 // CHECK-RV64-NEXT: entry: 741 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vbrev8.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 1) 742 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]] 743 // 744 vuint8m8_t test_vbrev8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { 745 return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); 746 } 747 748 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vbrev8_v_u16mf4_mu 749 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 750 // CHECK-RV64-NEXT: entry: 751 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vbrev8.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1) 752 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]] 753 // 754 vuint16mf4_t test_vbrev8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { 755 return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); 756 } 757 758 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vbrev8_v_u16mf2_mu 759 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 760 // CHECK-RV64-NEXT: entry: 761 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vbrev8.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1) 762 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]] 763 // 764 vuint16mf2_t test_vbrev8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { 765 return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); 766 } 767 768 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vbrev8_v_u16m1_mu 769 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 770 // CHECK-RV64-NEXT: entry: 771 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vbrev8.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1) 772 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]] 773 // 774 vuint16m1_t test_vbrev8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { 775 return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); 776 } 777 778 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vbrev8_v_u16m2_mu 779 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 780 // CHECK-RV64-NEXT: entry: 781 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vbrev8.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1) 782 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]] 783 // 784 vuint16m2_t test_vbrev8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { 785 return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); 786 } 787 788 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vbrev8_v_u16m4_mu 789 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 790 // CHECK-RV64-NEXT: entry: 791 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vbrev8.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1) 792 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]] 793 // 794 vuint16m4_t test_vbrev8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { 795 return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); 796 } 797 798 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vbrev8_v_u16m8_mu 799 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 800 // CHECK-RV64-NEXT: entry: 801 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vbrev8.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1) 802 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]] 803 // 804 vuint16m8_t test_vbrev8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { 805 return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); 806 } 807 808 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vbrev8_v_u32mf2_mu 809 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 810 // CHECK-RV64-NEXT: entry: 811 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vbrev8.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1) 812 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]] 813 // 814 vuint32mf2_t test_vbrev8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { 815 return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); 816 } 817 818 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vbrev8_v_u32m1_mu 819 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 820 // CHECK-RV64-NEXT: entry: 821 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vbrev8.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1) 822 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]] 823 // 824 vuint32m1_t test_vbrev8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { 825 return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); 826 } 827 828 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vbrev8_v_u32m2_mu 829 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 830 // CHECK-RV64-NEXT: entry: 831 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vbrev8.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1) 832 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]] 833 // 834 vuint32m2_t test_vbrev8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { 835 return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); 836 } 837 838 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vbrev8_v_u32m4_mu 839 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 840 // CHECK-RV64-NEXT: entry: 841 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vbrev8.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1) 842 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]] 843 // 844 vuint32m4_t test_vbrev8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { 845 return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); 846 } 847 848 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vbrev8_v_u32m8_mu 849 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 850 // CHECK-RV64-NEXT: entry: 851 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vbrev8.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1) 852 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]] 853 // 854 vuint32m8_t test_vbrev8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { 855 return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); 856 } 857 858 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vbrev8_v_u64m1_mu 859 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 860 // CHECK-RV64-NEXT: entry: 861 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vbrev8.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1) 862 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] 863 // 864 vuint64m1_t test_vbrev8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { 865 return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); 866 } 867 868 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vbrev8_v_u64m2_mu 869 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 870 // CHECK-RV64-NEXT: entry: 871 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vbrev8.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1) 872 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] 873 // 874 vuint64m2_t test_vbrev8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { 875 return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); 876 } 877 878 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vbrev8_v_u64m4_mu 879 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 880 // CHECK-RV64-NEXT: entry: 881 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vbrev8.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1) 882 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] 883 // 884 vuint64m4_t test_vbrev8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { 885 return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); 886 } 887 888 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vbrev8_v_u64m8_mu 889 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 890 // CHECK-RV64-NEXT: entry: 891 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vbrev8.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1) 892 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] 893 // 894 vuint64m8_t test_vbrev8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { 895 return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); 896 } 897 898