1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 2 // REQUIRES: riscv-registered-target 3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ 4 // RUN: -target-feature +zvbb \ 5 // RUN: -target-feature +zvbc \ 6 // RUN: -target-feature +zvkb \ 7 // RUN: -target-feature +zvkg \ 8 // RUN: -target-feature +zvkned \ 9 // RUN: -target-feature +zvknhb \ 10 // RUN: -target-feature +zvksed \ 11 // RUN: -target-feature +zvksh \ 12 // RUN: -disable-O0-optnone \ 13 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ 14 // RUN: FileCheck --check-prefix=CHECK-RV64 %s 15 16 #include <riscv_vector.h> 17 18 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vror_vv_u8mf8_tu 19 // CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { 20 // CHECK-RV64-NEXT: entry: 21 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]]) 22 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]] 23 // 24 vuint8mf8_t test_vror_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { 25 return __riscv_vror_tu(maskedoff, vs2, vs1, vl); 26 } 27 28 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vror_vx_u8mf8_tu 29 // CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 30 // CHECK-RV64-NEXT: entry: 31 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VS2]], i64 [[RS1]], i64 [[VL]]) 32 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]] 33 // 34 vuint8mf8_t test_vror_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { 35 return __riscv_vror_tu(maskedoff, vs2, rs1, vl); 36 } 37 38 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vror_vv_u8mf4_tu 39 // CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 40 // CHECK-RV64-NEXT: entry: 41 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]]) 42 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]] 43 // 44 vuint8mf4_t test_vror_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { 45 return __riscv_vror_tu(maskedoff, vs2, vs1, vl); 46 } 47 48 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vror_vx_u8mf4_tu 49 // CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 50 // CHECK-RV64-NEXT: entry: 51 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VS2]], i64 [[RS1]], i64 [[VL]]) 52 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]] 53 // 54 vuint8mf4_t test_vror_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { 55 return __riscv_vror_tu(maskedoff, vs2, rs1, vl); 56 } 57 58 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vror_vv_u8mf2_tu 59 // CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 60 // CHECK-RV64-NEXT: entry: 61 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]]) 62 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]] 63 // 64 vuint8mf2_t test_vror_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { 65 return __riscv_vror_tu(maskedoff, vs2, vs1, vl); 66 } 67 68 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vror_vx_u8mf2_tu 69 // CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 70 // CHECK-RV64-NEXT: entry: 71 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VS2]], i64 [[RS1]], i64 [[VL]]) 72 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]] 73 // 74 vuint8mf2_t test_vror_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { 75 return __riscv_vror_tu(maskedoff, vs2, rs1, vl); 76 } 77 78 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vror_vv_u8m1_tu 79 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 80 // CHECK-RV64-NEXT: entry: 81 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]]) 82 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]] 83 // 84 vuint8m1_t test_vror_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { 85 return __riscv_vror_tu(maskedoff, vs2, vs1, vl); 86 } 87 88 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vror_vx_u8m1_tu 89 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 90 // CHECK-RV64-NEXT: entry: 91 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VS2]], i64 [[RS1]], i64 [[VL]]) 92 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]] 93 // 94 vuint8m1_t test_vror_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { 95 return __riscv_vror_tu(maskedoff, vs2, rs1, vl); 96 } 97 98 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vror_vv_u8m2_tu 99 // CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 100 // CHECK-RV64-NEXT: entry: 101 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]]) 102 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]] 103 // 104 vuint8m2_t test_vror_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { 105 return __riscv_vror_tu(maskedoff, vs2, vs1, vl); 106 } 107 108 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vror_vx_u8m2_tu 109 // CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 110 // CHECK-RV64-NEXT: entry: 111 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VS2]], i64 [[RS1]], i64 [[VL]]) 112 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]] 113 // 114 vuint8m2_t test_vror_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { 115 return __riscv_vror_tu(maskedoff, vs2, rs1, vl); 116 } 117 118 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vror_vv_u8m4_tu 119 // CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 120 // CHECK-RV64-NEXT: entry: 121 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]]) 122 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]] 123 // 124 vuint8m4_t test_vror_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { 125 return __riscv_vror_tu(maskedoff, vs2, vs1, vl); 126 } 127 128 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vror_vx_u8m4_tu 129 // CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 130 // CHECK-RV64-NEXT: entry: 131 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VS2]], i64 [[RS1]], i64 [[VL]]) 132 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]] 133 // 134 vuint8m4_t test_vror_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { 135 return __riscv_vror_tu(maskedoff, vs2, rs1, vl); 136 } 137 138 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vror_vv_u8m8_tu 139 // CHECK-RV64-SAME: (<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 140 // CHECK-RV64-NEXT: entry: 141 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]]) 142 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]] 143 // 144 vuint8m8_t test_vror_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { 145 return __riscv_vror_tu(maskedoff, vs2, vs1, vl); 146 } 147 148 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vror_vx_u8m8_tu 149 // CHECK-RV64-SAME: (<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 150 // CHECK-RV64-NEXT: entry: 151 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VS2]], i64 [[RS1]], i64 [[VL]]) 152 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]] 153 // 154 vuint8m8_t test_vror_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { 155 return __riscv_vror_tu(maskedoff, vs2, rs1, vl); 156 } 157 158 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vror_vv_u16mf4_tu 159 // CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 160 // CHECK-RV64-NEXT: entry: 161 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]]) 162 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]] 163 // 164 vuint16mf4_t test_vror_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { 165 return __riscv_vror_tu(maskedoff, vs2, vs1, vl); 166 } 167 168 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vror_vx_u16mf4_tu 169 // CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 170 // CHECK-RV64-NEXT: entry: 171 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VS2]], i64 [[RS1]], i64 [[VL]]) 172 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]] 173 // 174 vuint16mf4_t test_vror_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { 175 return __riscv_vror_tu(maskedoff, vs2, rs1, vl); 176 } 177 178 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vror_vv_u16mf2_tu 179 // CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 180 // CHECK-RV64-NEXT: entry: 181 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]]) 182 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]] 183 // 184 vuint16mf2_t test_vror_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { 185 return __riscv_vror_tu(maskedoff, vs2, vs1, vl); 186 } 187 188 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vror_vx_u16mf2_tu 189 // CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 190 // CHECK-RV64-NEXT: entry: 191 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VS2]], i64 [[RS1]], i64 [[VL]]) 192 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]] 193 // 194 vuint16mf2_t test_vror_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { 195 return __riscv_vror_tu(maskedoff, vs2, rs1, vl); 196 } 197 198 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vror_vv_u16m1_tu 199 // CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 200 // CHECK-RV64-NEXT: entry: 201 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]]) 202 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]] 203 // 204 vuint16m1_t test_vror_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { 205 return __riscv_vror_tu(maskedoff, vs2, vs1, vl); 206 } 207 208 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vror_vx_u16m1_tu 209 // CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 210 // CHECK-RV64-NEXT: entry: 211 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VS2]], i64 [[RS1]], i64 [[VL]]) 212 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]] 213 // 214 vuint16m1_t test_vror_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { 215 return __riscv_vror_tu(maskedoff, vs2, rs1, vl); 216 } 217 218 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vror_vv_u16m2_tu 219 // CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 220 // CHECK-RV64-NEXT: entry: 221 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]]) 222 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]] 223 // 224 vuint16m2_t test_vror_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { 225 return __riscv_vror_tu(maskedoff, vs2, vs1, vl); 226 } 227 228 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vror_vx_u16m2_tu 229 // CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 230 // CHECK-RV64-NEXT: entry: 231 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VS2]], i64 [[RS1]], i64 [[VL]]) 232 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]] 233 // 234 vuint16m2_t test_vror_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { 235 return __riscv_vror_tu(maskedoff, vs2, rs1, vl); 236 } 237 238 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vror_vv_u16m4_tu 239 // CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 240 // CHECK-RV64-NEXT: entry: 241 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]]) 242 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]] 243 // 244 vuint16m4_t test_vror_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { 245 return __riscv_vror_tu(maskedoff, vs2, vs1, vl); 246 } 247 248 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vror_vx_u16m4_tu 249 // CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 250 // CHECK-RV64-NEXT: entry: 251 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VS2]], i64 [[RS1]], i64 [[VL]]) 252 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]] 253 // 254 vuint16m4_t test_vror_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { 255 return __riscv_vror_tu(maskedoff, vs2, rs1, vl); 256 } 257 258 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vror_vv_u16m8_tu 259 // CHECK-RV64-SAME: (<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 260 // CHECK-RV64-NEXT: entry: 261 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]]) 262 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]] 263 // 264 vuint16m8_t test_vror_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { 265 return __riscv_vror_tu(maskedoff, vs2, vs1, vl); 266 } 267 268 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vror_vx_u16m8_tu 269 // CHECK-RV64-SAME: (<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 270 // CHECK-RV64-NEXT: entry: 271 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VS2]], i64 [[RS1]], i64 [[VL]]) 272 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]] 273 // 274 vuint16m8_t test_vror_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { 275 return __riscv_vror_tu(maskedoff, vs2, rs1, vl); 276 } 277 278 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vror_vv_u32mf2_tu 279 // CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 280 // CHECK-RV64-NEXT: entry: 281 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], i64 [[VL]]) 282 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]] 283 // 284 vuint32mf2_t test_vror_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { 285 return __riscv_vror_tu(maskedoff, vs2, vs1, vl); 286 } 287 288 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vror_vx_u32mf2_tu 289 // CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 290 // CHECK-RV64-NEXT: entry: 291 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VS2]], i64 [[RS1]], i64 [[VL]]) 292 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]] 293 // 294 vuint32mf2_t test_vror_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { 295 return __riscv_vror_tu(maskedoff, vs2, rs1, vl); 296 } 297 298 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vror_vv_u32m1_tu 299 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 300 // CHECK-RV64-NEXT: entry: 301 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], i64 [[VL]]) 302 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]] 303 // 304 vuint32m1_t test_vror_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { 305 return __riscv_vror_tu(maskedoff, vs2, vs1, vl); 306 } 307 308 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vror_vx_u32m1_tu 309 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 310 // CHECK-RV64-NEXT: entry: 311 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VS2]], i64 [[RS1]], i64 [[VL]]) 312 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]] 313 // 314 vuint32m1_t test_vror_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { 315 return __riscv_vror_tu(maskedoff, vs2, rs1, vl); 316 } 317 318 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vror_vv_u32m2_tu 319 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 320 // CHECK-RV64-NEXT: entry: 321 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], i64 [[VL]]) 322 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]] 323 // 324 vuint32m2_t test_vror_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { 325 return __riscv_vror_tu(maskedoff, vs2, vs1, vl); 326 } 327 328 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vror_vx_u32m2_tu 329 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 330 // CHECK-RV64-NEXT: entry: 331 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VS2]], i64 [[RS1]], i64 [[VL]]) 332 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]] 333 // 334 vuint32m2_t test_vror_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { 335 return __riscv_vror_tu(maskedoff, vs2, rs1, vl); 336 } 337 338 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vror_vv_u32m4_tu 339 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 340 // CHECK-RV64-NEXT: entry: 341 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], i64 [[VL]]) 342 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]] 343 // 344 vuint32m4_t test_vror_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { 345 return __riscv_vror_tu(maskedoff, vs2, vs1, vl); 346 } 347 348 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vror_vx_u32m4_tu 349 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 350 // CHECK-RV64-NEXT: entry: 351 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VS2]], i64 [[RS1]], i64 [[VL]]) 352 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]] 353 // 354 vuint32m4_t test_vror_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { 355 return __riscv_vror_tu(maskedoff, vs2, rs1, vl); 356 } 357 358 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vror_vv_u32m8_tu 359 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 360 // CHECK-RV64-NEXT: entry: 361 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], i64 [[VL]]) 362 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]] 363 // 364 vuint32m8_t test_vror_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { 365 return __riscv_vror_tu(maskedoff, vs2, vs1, vl); 366 } 367 368 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vror_vx_u32m8_tu 369 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 370 // CHECK-RV64-NEXT: entry: 371 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VS2]], i64 [[RS1]], i64 [[VL]]) 372 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]] 373 // 374 vuint32m8_t test_vror_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { 375 return __riscv_vror_tu(maskedoff, vs2, rs1, vl); 376 } 377 378 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vror_vv_u64m1_tu 379 // CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 380 // CHECK-RV64-NEXT: entry: 381 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], i64 [[VL]]) 382 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] 383 // 384 vuint64m1_t test_vror_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { 385 return __riscv_vror_tu(maskedoff, vs2, vs1, vl); 386 } 387 388 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vror_vx_u64m1_tu 389 // CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 390 // CHECK-RV64-NEXT: entry: 391 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VS2]], i64 [[RS1]], i64 [[VL]]) 392 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] 393 // 394 vuint64m1_t test_vror_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { 395 return __riscv_vror_tu(maskedoff, vs2, rs1, vl); 396 } 397 398 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vror_vv_u64m2_tu 399 // CHECK-RV64-SAME: (<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 400 // CHECK-RV64-NEXT: entry: 401 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], i64 [[VL]]) 402 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] 403 // 404 vuint64m2_t test_vror_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { 405 return __riscv_vror_tu(maskedoff, vs2, vs1, vl); 406 } 407 408 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vror_vx_u64m2_tu 409 // CHECK-RV64-SAME: (<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 410 // CHECK-RV64-NEXT: entry: 411 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VS2]], i64 [[RS1]], i64 [[VL]]) 412 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] 413 // 414 vuint64m2_t test_vror_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { 415 return __riscv_vror_tu(maskedoff, vs2, rs1, vl); 416 } 417 418 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vror_vv_u64m4_tu 419 // CHECK-RV64-SAME: (<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 420 // CHECK-RV64-NEXT: entry: 421 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], i64 [[VL]]) 422 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] 423 // 424 vuint64m4_t test_vror_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { 425 return __riscv_vror_tu(maskedoff, vs2, vs1, vl); 426 } 427 428 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vror_vx_u64m4_tu 429 // CHECK-RV64-SAME: (<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 430 // CHECK-RV64-NEXT: entry: 431 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VS2]], i64 [[RS1]], i64 [[VL]]) 432 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] 433 // 434 vuint64m4_t test_vror_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { 435 return __riscv_vror_tu(maskedoff, vs2, rs1, vl); 436 } 437 438 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vror_vv_u64m8_tu 439 // CHECK-RV64-SAME: (<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 440 // CHECK-RV64-NEXT: entry: 441 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], i64 [[VL]]) 442 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] 443 // 444 vuint64m8_t test_vror_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { 445 return __riscv_vror_tu(maskedoff, vs2, vs1, vl); 446 } 447 448 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vror_vx_u64m8_tu 449 // CHECK-RV64-SAME: (<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 450 // CHECK-RV64-NEXT: entry: 451 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VS2]], i64 [[RS1]], i64 [[VL]]) 452 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] 453 // 454 vuint64m8_t test_vror_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { 455 return __riscv_vror_tu(maskedoff, vs2, rs1, vl); 456 } 457 458 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vror_vv_u8mf8_tum 459 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 460 // CHECK-RV64-NEXT: entry: 461 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2) 462 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]] 463 // 464 vuint8mf8_t test_vror_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { 465 return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); 466 } 467 468 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vror_vx_u8mf8_tum 469 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 470 // CHECK-RV64-NEXT: entry: 471 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VS2]], i64 [[RS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2) 472 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]] 473 // 474 vuint8mf8_t test_vror_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { 475 return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); 476 } 477 478 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vror_vv_u8mf4_tum 479 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 480 // CHECK-RV64-NEXT: entry: 481 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2) 482 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]] 483 // 484 vuint8mf4_t test_vror_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { 485 return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); 486 } 487 488 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vror_vx_u8mf4_tum 489 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 490 // CHECK-RV64-NEXT: entry: 491 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VS2]], i64 [[RS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2) 492 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]] 493 // 494 vuint8mf4_t test_vror_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { 495 return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); 496 } 497 498 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vror_vv_u8mf2_tum 499 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 500 // CHECK-RV64-NEXT: entry: 501 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2) 502 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]] 503 // 504 vuint8mf2_t test_vror_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { 505 return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); 506 } 507 508 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vror_vx_u8mf2_tum 509 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 510 // CHECK-RV64-NEXT: entry: 511 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VS2]], i64 [[RS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2) 512 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]] 513 // 514 vuint8mf2_t test_vror_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { 515 return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); 516 } 517 518 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vror_vv_u8m1_tum 519 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 520 // CHECK-RV64-NEXT: entry: 521 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2) 522 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]] 523 // 524 vuint8m1_t test_vror_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { 525 return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); 526 } 527 528 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vror_vx_u8m1_tum 529 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 530 // CHECK-RV64-NEXT: entry: 531 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VS2]], i64 [[RS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2) 532 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]] 533 // 534 vuint8m1_t test_vror_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { 535 return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); 536 } 537 538 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vror_vv_u8m2_tum 539 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 540 // CHECK-RV64-NEXT: entry: 541 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2) 542 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]] 543 // 544 vuint8m2_t test_vror_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { 545 return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); 546 } 547 548 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vror_vx_u8m2_tum 549 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 550 // CHECK-RV64-NEXT: entry: 551 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VS2]], i64 [[RS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2) 552 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]] 553 // 554 vuint8m2_t test_vror_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { 555 return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); 556 } 557 558 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vror_vv_u8m4_tum 559 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 560 // CHECK-RV64-NEXT: entry: 561 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2) 562 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]] 563 // 564 vuint8m4_t test_vror_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { 565 return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); 566 } 567 568 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vror_vx_u8m4_tum 569 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 570 // CHECK-RV64-NEXT: entry: 571 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VS2]], i64 [[RS1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2) 572 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]] 573 // 574 vuint8m4_t test_vror_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { 575 return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); 576 } 577 578 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vror_vv_u8m8_tum 579 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 580 // CHECK-RV64-NEXT: entry: 581 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 2) 582 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]] 583 // 584 vuint8m8_t test_vror_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { 585 return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); 586 } 587 588 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vror_vx_u8m8_tum 589 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 590 // CHECK-RV64-NEXT: entry: 591 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VS2]], i64 [[RS1]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 2) 592 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]] 593 // 594 vuint8m8_t test_vror_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { 595 return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); 596 } 597 598 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vror_vv_u16mf4_tum 599 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 600 // CHECK-RV64-NEXT: entry: 601 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2) 602 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]] 603 // 604 vuint16mf4_t test_vror_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { 605 return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); 606 } 607 608 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vror_vx_u16mf4_tum 609 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 610 // CHECK-RV64-NEXT: entry: 611 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VS2]], i64 [[RS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2) 612 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]] 613 // 614 vuint16mf4_t test_vror_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { 615 return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); 616 } 617 618 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vror_vv_u16mf2_tum 619 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 620 // CHECK-RV64-NEXT: entry: 621 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2) 622 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]] 623 // 624 vuint16mf2_t test_vror_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { 625 return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); 626 } 627 628 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vror_vx_u16mf2_tum 629 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 630 // CHECK-RV64-NEXT: entry: 631 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VS2]], i64 [[RS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2) 632 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]] 633 // 634 vuint16mf2_t test_vror_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { 635 return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); 636 } 637 638 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vror_vv_u16m1_tum 639 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 640 // CHECK-RV64-NEXT: entry: 641 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2) 642 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]] 643 // 644 vuint16m1_t test_vror_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { 645 return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); 646 } 647 648 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vror_vx_u16m1_tum 649 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 650 // CHECK-RV64-NEXT: entry: 651 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VS2]], i64 [[RS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2) 652 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]] 653 // 654 vuint16m1_t test_vror_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { 655 return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); 656 } 657 658 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vror_vv_u16m2_tum 659 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 660 // CHECK-RV64-NEXT: entry: 661 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2) 662 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]] 663 // 664 vuint16m2_t test_vror_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { 665 return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); 666 } 667 668 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vror_vx_u16m2_tum 669 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 670 // CHECK-RV64-NEXT: entry: 671 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VS2]], i64 [[RS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2) 672 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]] 673 // 674 vuint16m2_t test_vror_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { 675 return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); 676 } 677 678 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vror_vv_u16m4_tum 679 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 680 // CHECK-RV64-NEXT: entry: 681 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2) 682 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]] 683 // 684 vuint16m4_t test_vror_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { 685 return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); 686 } 687 688 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vror_vx_u16m4_tum 689 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 690 // CHECK-RV64-NEXT: entry: 691 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VS2]], i64 [[RS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2) 692 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]] 693 // 694 vuint16m4_t test_vror_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { 695 return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); 696 } 697 698 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vror_vv_u16m8_tum 699 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 700 // CHECK-RV64-NEXT: entry: 701 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2) 702 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]] 703 // 704 vuint16m8_t test_vror_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { 705 return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); 706 } 707 708 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vror_vx_u16m8_tum 709 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 710 // CHECK-RV64-NEXT: entry: 711 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VS2]], i64 [[RS1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2) 712 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]] 713 // 714 vuint16m8_t test_vror_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { 715 return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); 716 } 717 718 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vror_vv_u32mf2_tum 719 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 720 // CHECK-RV64-NEXT: entry: 721 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2) 722 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]] 723 // 724 vuint32mf2_t test_vror_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { 725 return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); 726 } 727 728 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vror_vx_u32mf2_tum 729 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 730 // CHECK-RV64-NEXT: entry: 731 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VS2]], i64 [[RS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2) 732 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]] 733 // 734 vuint32mf2_t test_vror_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { 735 return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); 736 } 737 738 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vror_vv_u32m1_tum 739 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 740 // CHECK-RV64-NEXT: entry: 741 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2) 742 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]] 743 // 744 vuint32m1_t test_vror_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { 745 return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); 746 } 747 748 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vror_vx_u32m1_tum 749 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 750 // CHECK-RV64-NEXT: entry: 751 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VS2]], i64 [[RS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2) 752 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]] 753 // 754 vuint32m1_t test_vror_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { 755 return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); 756 } 757 758 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vror_vv_u32m2_tum 759 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 760 // CHECK-RV64-NEXT: entry: 761 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2) 762 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]] 763 // 764 vuint32m2_t test_vror_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { 765 return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); 766 } 767 768 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vror_vx_u32m2_tum 769 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 770 // CHECK-RV64-NEXT: entry: 771 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VS2]], i64 [[RS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2) 772 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]] 773 // 774 vuint32m2_t test_vror_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { 775 return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); 776 } 777 778 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vror_vv_u32m4_tum 779 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 780 // CHECK-RV64-NEXT: entry: 781 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2) 782 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]] 783 // 784 vuint32m4_t test_vror_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { 785 return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); 786 } 787 788 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vror_vx_u32m4_tum 789 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 790 // CHECK-RV64-NEXT: entry: 791 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VS2]], i64 [[RS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2) 792 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]] 793 // 794 vuint32m4_t test_vror_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { 795 return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); 796 } 797 798 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vror_vv_u32m8_tum 799 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 800 // CHECK-RV64-NEXT: entry: 801 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2) 802 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]] 803 // 804 vuint32m8_t test_vror_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { 805 return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); 806 } 807 808 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vror_vx_u32m8_tum 809 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 810 // CHECK-RV64-NEXT: entry: 811 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VS2]], i64 [[RS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2) 812 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]] 813 // 814 vuint32m8_t test_vror_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { 815 return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); 816 } 817 818 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vror_vv_u64m1_tum 819 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 820 // CHECK-RV64-NEXT: entry: 821 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2) 822 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] 823 // 824 vuint64m1_t test_vror_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { 825 return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); 826 } 827 828 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vror_vx_u64m1_tum 829 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 830 // CHECK-RV64-NEXT: entry: 831 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VS2]], i64 [[RS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2) 832 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] 833 // 834 vuint64m1_t test_vror_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { 835 return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); 836 } 837 838 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vror_vv_u64m2_tum 839 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 840 // CHECK-RV64-NEXT: entry: 841 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2) 842 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] 843 // 844 vuint64m2_t test_vror_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { 845 return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); 846 } 847 848 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vror_vx_u64m2_tum 849 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 850 // CHECK-RV64-NEXT: entry: 851 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VS2]], i64 [[RS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2) 852 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] 853 // 854 vuint64m2_t test_vror_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { 855 return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); 856 } 857 858 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vror_vv_u64m4_tum 859 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 860 // CHECK-RV64-NEXT: entry: 861 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2) 862 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] 863 // 864 vuint64m4_t test_vror_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { 865 return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); 866 } 867 868 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vror_vx_u64m4_tum 869 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 870 // CHECK-RV64-NEXT: entry: 871 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VS2]], i64 [[RS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2) 872 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] 873 // 874 vuint64m4_t test_vror_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { 875 return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); 876 } 877 878 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vror_vv_u64m8_tum 879 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 880 // CHECK-RV64-NEXT: entry: 881 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2) 882 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] 883 // 884 vuint64m8_t test_vror_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { 885 return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); 886 } 887 888 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vror_vx_u64m8_tum 889 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 890 // CHECK-RV64-NEXT: entry: 891 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VS2]], i64 [[RS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2) 892 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] 893 // 894 vuint64m8_t test_vror_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { 895 return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); 896 } 897 898 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vror_vv_u8mf8_tumu 899 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 900 // CHECK-RV64-NEXT: entry: 901 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0) 902 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]] 903 // 904 vuint8mf8_t test_vror_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { 905 return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); 906 } 907 908 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vror_vx_u8mf8_tumu 909 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 910 // CHECK-RV64-NEXT: entry: 911 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VS2]], i64 [[RS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0) 912 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]] 913 // 914 vuint8mf8_t test_vror_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { 915 return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); 916 } 917 918 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vror_vv_u8mf4_tumu 919 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 920 // CHECK-RV64-NEXT: entry: 921 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0) 922 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]] 923 // 924 vuint8mf4_t test_vror_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { 925 return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); 926 } 927 928 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vror_vx_u8mf4_tumu 929 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 930 // CHECK-RV64-NEXT: entry: 931 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VS2]], i64 [[RS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0) 932 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]] 933 // 934 vuint8mf4_t test_vror_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { 935 return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); 936 } 937 938 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vror_vv_u8mf2_tumu 939 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 940 // CHECK-RV64-NEXT: entry: 941 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0) 942 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]] 943 // 944 vuint8mf2_t test_vror_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { 945 return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); 946 } 947 948 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vror_vx_u8mf2_tumu 949 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 950 // CHECK-RV64-NEXT: entry: 951 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VS2]], i64 [[RS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0) 952 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]] 953 // 954 vuint8mf2_t test_vror_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { 955 return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); 956 } 957 958 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vror_vv_u8m1_tumu 959 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 960 // CHECK-RV64-NEXT: entry: 961 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0) 962 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]] 963 // 964 vuint8m1_t test_vror_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { 965 return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); 966 } 967 968 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vror_vx_u8m1_tumu 969 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 970 // CHECK-RV64-NEXT: entry: 971 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VS2]], i64 [[RS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0) 972 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]] 973 // 974 vuint8m1_t test_vror_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { 975 return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); 976 } 977 978 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vror_vv_u8m2_tumu 979 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 980 // CHECK-RV64-NEXT: entry: 981 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0) 982 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]] 983 // 984 vuint8m2_t test_vror_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { 985 return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); 986 } 987 988 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vror_vx_u8m2_tumu 989 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 990 // CHECK-RV64-NEXT: entry: 991 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VS2]], i64 [[RS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0) 992 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]] 993 // 994 vuint8m2_t test_vror_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { 995 return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); 996 } 997 998 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vror_vv_u8m4_tumu 999 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1000 // CHECK-RV64-NEXT: entry: 1001 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0) 1002 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]] 1003 // 1004 vuint8m4_t test_vror_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { 1005 return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); 1006 } 1007 1008 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vror_vx_u8m4_tumu 1009 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1010 // CHECK-RV64-NEXT: entry: 1011 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VS2]], i64 [[RS1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0) 1012 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]] 1013 // 1014 vuint8m4_t test_vror_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { 1015 return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); 1016 } 1017 1018 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vror_vv_u8m8_tumu 1019 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1020 // CHECK-RV64-NEXT: entry: 1021 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 0) 1022 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]] 1023 // 1024 vuint8m8_t test_vror_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { 1025 return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); 1026 } 1027 1028 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vror_vx_u8m8_tumu 1029 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1030 // CHECK-RV64-NEXT: entry: 1031 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VS2]], i64 [[RS1]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 0) 1032 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]] 1033 // 1034 vuint8m8_t test_vror_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { 1035 return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); 1036 } 1037 1038 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vror_vv_u16mf4_tumu 1039 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1040 // CHECK-RV64-NEXT: entry: 1041 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0) 1042 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]] 1043 // 1044 vuint16mf4_t test_vror_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { 1045 return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); 1046 } 1047 1048 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vror_vx_u16mf4_tumu 1049 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1050 // CHECK-RV64-NEXT: entry: 1051 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VS2]], i64 [[RS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0) 1052 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]] 1053 // 1054 vuint16mf4_t test_vror_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { 1055 return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); 1056 } 1057 1058 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vror_vv_u16mf2_tumu 1059 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1060 // CHECK-RV64-NEXT: entry: 1061 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0) 1062 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]] 1063 // 1064 vuint16mf2_t test_vror_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { 1065 return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); 1066 } 1067 1068 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vror_vx_u16mf2_tumu 1069 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1070 // CHECK-RV64-NEXT: entry: 1071 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VS2]], i64 [[RS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0) 1072 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]] 1073 // 1074 vuint16mf2_t test_vror_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { 1075 return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); 1076 } 1077 1078 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vror_vv_u16m1_tumu 1079 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1080 // CHECK-RV64-NEXT: entry: 1081 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0) 1082 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]] 1083 // 1084 vuint16m1_t test_vror_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { 1085 return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); 1086 } 1087 1088 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vror_vx_u16m1_tumu 1089 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1090 // CHECK-RV64-NEXT: entry: 1091 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VS2]], i64 [[RS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0) 1092 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]] 1093 // 1094 vuint16m1_t test_vror_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { 1095 return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); 1096 } 1097 1098 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vror_vv_u16m2_tumu 1099 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1100 // CHECK-RV64-NEXT: entry: 1101 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0) 1102 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]] 1103 // 1104 vuint16m2_t test_vror_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { 1105 return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); 1106 } 1107 1108 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vror_vx_u16m2_tumu 1109 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1110 // CHECK-RV64-NEXT: entry: 1111 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VS2]], i64 [[RS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0) 1112 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]] 1113 // 1114 vuint16m2_t test_vror_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { 1115 return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); 1116 } 1117 1118 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vror_vv_u16m4_tumu 1119 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1120 // CHECK-RV64-NEXT: entry: 1121 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0) 1122 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]] 1123 // 1124 vuint16m4_t test_vror_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { 1125 return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); 1126 } 1127 1128 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vror_vx_u16m4_tumu 1129 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1130 // CHECK-RV64-NEXT: entry: 1131 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VS2]], i64 [[RS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0) 1132 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]] 1133 // 1134 vuint16m4_t test_vror_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { 1135 return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); 1136 } 1137 1138 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vror_vv_u16m8_tumu 1139 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1140 // CHECK-RV64-NEXT: entry: 1141 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0) 1142 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]] 1143 // 1144 vuint16m8_t test_vror_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { 1145 return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); 1146 } 1147 1148 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vror_vx_u16m8_tumu 1149 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1150 // CHECK-RV64-NEXT: entry: 1151 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VS2]], i64 [[RS1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0) 1152 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]] 1153 // 1154 vuint16m8_t test_vror_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { 1155 return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); 1156 } 1157 1158 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vror_vv_u32mf2_tumu 1159 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1160 // CHECK-RV64-NEXT: entry: 1161 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0) 1162 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]] 1163 // 1164 vuint32mf2_t test_vror_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { 1165 return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); 1166 } 1167 1168 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vror_vx_u32mf2_tumu 1169 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1170 // CHECK-RV64-NEXT: entry: 1171 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VS2]], i64 [[RS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0) 1172 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]] 1173 // 1174 vuint32mf2_t test_vror_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { 1175 return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); 1176 } 1177 1178 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vror_vv_u32m1_tumu 1179 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1180 // CHECK-RV64-NEXT: entry: 1181 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0) 1182 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]] 1183 // 1184 vuint32m1_t test_vror_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { 1185 return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); 1186 } 1187 1188 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vror_vx_u32m1_tumu 1189 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1190 // CHECK-RV64-NEXT: entry: 1191 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VS2]], i64 [[RS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0) 1192 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]] 1193 // 1194 vuint32m1_t test_vror_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { 1195 return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); 1196 } 1197 1198 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vror_vv_u32m2_tumu 1199 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1200 // CHECK-RV64-NEXT: entry: 1201 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0) 1202 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]] 1203 // 1204 vuint32m2_t test_vror_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { 1205 return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); 1206 } 1207 1208 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vror_vx_u32m2_tumu 1209 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1210 // CHECK-RV64-NEXT: entry: 1211 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VS2]], i64 [[RS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0) 1212 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]] 1213 // 1214 vuint32m2_t test_vror_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { 1215 return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); 1216 } 1217 1218 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vror_vv_u32m4_tumu 1219 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1220 // CHECK-RV64-NEXT: entry: 1221 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0) 1222 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]] 1223 // 1224 vuint32m4_t test_vror_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { 1225 return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); 1226 } 1227 1228 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vror_vx_u32m4_tumu 1229 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1230 // CHECK-RV64-NEXT: entry: 1231 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VS2]], i64 [[RS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0) 1232 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]] 1233 // 1234 vuint32m4_t test_vror_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { 1235 return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); 1236 } 1237 1238 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vror_vv_u32m8_tumu 1239 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1240 // CHECK-RV64-NEXT: entry: 1241 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0) 1242 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]] 1243 // 1244 vuint32m8_t test_vror_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { 1245 return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); 1246 } 1247 1248 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vror_vx_u32m8_tumu 1249 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1250 // CHECK-RV64-NEXT: entry: 1251 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VS2]], i64 [[RS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0) 1252 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]] 1253 // 1254 vuint32m8_t test_vror_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { 1255 return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); 1256 } 1257 1258 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vror_vv_u64m1_tumu 1259 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1260 // CHECK-RV64-NEXT: entry: 1261 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0) 1262 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] 1263 // 1264 vuint64m1_t test_vror_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { 1265 return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); 1266 } 1267 1268 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vror_vx_u64m1_tumu 1269 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1270 // CHECK-RV64-NEXT: entry: 1271 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VS2]], i64 [[RS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0) 1272 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] 1273 // 1274 vuint64m1_t test_vror_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { 1275 return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); 1276 } 1277 1278 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vror_vv_u64m2_tumu 1279 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1280 // CHECK-RV64-NEXT: entry: 1281 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0) 1282 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] 1283 // 1284 vuint64m2_t test_vror_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { 1285 return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); 1286 } 1287 1288 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vror_vx_u64m2_tumu 1289 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1290 // CHECK-RV64-NEXT: entry: 1291 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VS2]], i64 [[RS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0) 1292 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] 1293 // 1294 vuint64m2_t test_vror_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { 1295 return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); 1296 } 1297 1298 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vror_vv_u64m4_tumu 1299 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1300 // CHECK-RV64-NEXT: entry: 1301 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0) 1302 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] 1303 // 1304 vuint64m4_t test_vror_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { 1305 return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); 1306 } 1307 1308 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vror_vx_u64m4_tumu 1309 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1310 // CHECK-RV64-NEXT: entry: 1311 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VS2]], i64 [[RS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0) 1312 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] 1313 // 1314 vuint64m4_t test_vror_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { 1315 return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); 1316 } 1317 1318 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vror_vv_u64m8_tumu 1319 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1320 // CHECK-RV64-NEXT: entry: 1321 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0) 1322 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] 1323 // 1324 vuint64m8_t test_vror_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { 1325 return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); 1326 } 1327 1328 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vror_vx_u64m8_tumu 1329 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1330 // CHECK-RV64-NEXT: entry: 1331 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VS2]], i64 [[RS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0) 1332 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] 1333 // 1334 vuint64m8_t test_vror_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { 1335 return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); 1336 } 1337 1338 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vror_vv_u8mf8_mu 1339 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1340 // CHECK-RV64-NEXT: entry: 1341 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1) 1342 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]] 1343 // 1344 vuint8mf8_t test_vror_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { 1345 return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); 1346 } 1347 1348 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vror_vx_u8mf8_mu 1349 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1350 // CHECK-RV64-NEXT: entry: 1351 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VS2]], i64 [[RS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1) 1352 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]] 1353 // 1354 vuint8mf8_t test_vror_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { 1355 return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); 1356 } 1357 1358 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vror_vv_u8mf4_mu 1359 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1360 // CHECK-RV64-NEXT: entry: 1361 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1) 1362 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]] 1363 // 1364 vuint8mf4_t test_vror_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { 1365 return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); 1366 } 1367 1368 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vror_vx_u8mf4_mu 1369 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1370 // CHECK-RV64-NEXT: entry: 1371 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VS2]], i64 [[RS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1) 1372 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]] 1373 // 1374 vuint8mf4_t test_vror_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { 1375 return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); 1376 } 1377 1378 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vror_vv_u8mf2_mu 1379 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1380 // CHECK-RV64-NEXT: entry: 1381 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1) 1382 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]] 1383 // 1384 vuint8mf2_t test_vror_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { 1385 return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); 1386 } 1387 1388 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vror_vx_u8mf2_mu 1389 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1390 // CHECK-RV64-NEXT: entry: 1391 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VS2]], i64 [[RS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1) 1392 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]] 1393 // 1394 vuint8mf2_t test_vror_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { 1395 return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); 1396 } 1397 1398 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vror_vv_u8m1_mu 1399 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1400 // CHECK-RV64-NEXT: entry: 1401 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1) 1402 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]] 1403 // 1404 vuint8m1_t test_vror_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { 1405 return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); 1406 } 1407 1408 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vror_vx_u8m1_mu 1409 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1410 // CHECK-RV64-NEXT: entry: 1411 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VS2]], i64 [[RS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1) 1412 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]] 1413 // 1414 vuint8m1_t test_vror_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { 1415 return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); 1416 } 1417 1418 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vror_vv_u8m2_mu 1419 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1420 // CHECK-RV64-NEXT: entry: 1421 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1) 1422 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]] 1423 // 1424 vuint8m2_t test_vror_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { 1425 return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); 1426 } 1427 1428 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vror_vx_u8m2_mu 1429 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1430 // CHECK-RV64-NEXT: entry: 1431 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VS2]], i64 [[RS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1) 1432 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]] 1433 // 1434 vuint8m2_t test_vror_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { 1435 return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); 1436 } 1437 1438 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vror_vv_u8m4_mu 1439 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1440 // CHECK-RV64-NEXT: entry: 1441 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1) 1442 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]] 1443 // 1444 vuint8m4_t test_vror_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { 1445 return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); 1446 } 1447 1448 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vror_vx_u8m4_mu 1449 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1450 // CHECK-RV64-NEXT: entry: 1451 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VS2]], i64 [[RS1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1) 1452 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]] 1453 // 1454 vuint8m4_t test_vror_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { 1455 return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); 1456 } 1457 1458 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vror_vv_u8m8_mu 1459 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1460 // CHECK-RV64-NEXT: entry: 1461 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 1) 1462 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]] 1463 // 1464 vuint8m8_t test_vror_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { 1465 return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); 1466 } 1467 1468 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vror_vx_u8m8_mu 1469 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1470 // CHECK-RV64-NEXT: entry: 1471 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VS2]], i64 [[RS1]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 1) 1472 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]] 1473 // 1474 vuint8m8_t test_vror_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { 1475 return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); 1476 } 1477 1478 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vror_vv_u16mf4_mu 1479 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1480 // CHECK-RV64-NEXT: entry: 1481 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1) 1482 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]] 1483 // 1484 vuint16mf4_t test_vror_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { 1485 return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); 1486 } 1487 1488 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vror_vx_u16mf4_mu 1489 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1490 // CHECK-RV64-NEXT: entry: 1491 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VS2]], i64 [[RS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1) 1492 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]] 1493 // 1494 vuint16mf4_t test_vror_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { 1495 return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); 1496 } 1497 1498 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vror_vv_u16mf2_mu 1499 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1500 // CHECK-RV64-NEXT: entry: 1501 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1) 1502 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]] 1503 // 1504 vuint16mf2_t test_vror_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { 1505 return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); 1506 } 1507 1508 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vror_vx_u16mf2_mu 1509 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1510 // CHECK-RV64-NEXT: entry: 1511 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VS2]], i64 [[RS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1) 1512 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]] 1513 // 1514 vuint16mf2_t test_vror_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { 1515 return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); 1516 } 1517 1518 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vror_vv_u16m1_mu 1519 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1520 // CHECK-RV64-NEXT: entry: 1521 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1) 1522 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]] 1523 // 1524 vuint16m1_t test_vror_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { 1525 return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); 1526 } 1527 1528 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vror_vx_u16m1_mu 1529 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1530 // CHECK-RV64-NEXT: entry: 1531 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VS2]], i64 [[RS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1) 1532 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]] 1533 // 1534 vuint16m1_t test_vror_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { 1535 return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); 1536 } 1537 1538 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vror_vv_u16m2_mu 1539 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1540 // CHECK-RV64-NEXT: entry: 1541 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1) 1542 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]] 1543 // 1544 vuint16m2_t test_vror_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { 1545 return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); 1546 } 1547 1548 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vror_vx_u16m2_mu 1549 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1550 // CHECK-RV64-NEXT: entry: 1551 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VS2]], i64 [[RS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1) 1552 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]] 1553 // 1554 vuint16m2_t test_vror_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { 1555 return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); 1556 } 1557 1558 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vror_vv_u16m4_mu 1559 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1560 // CHECK-RV64-NEXT: entry: 1561 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1) 1562 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]] 1563 // 1564 vuint16m4_t test_vror_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { 1565 return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); 1566 } 1567 1568 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vror_vx_u16m4_mu 1569 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1570 // CHECK-RV64-NEXT: entry: 1571 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VS2]], i64 [[RS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1) 1572 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]] 1573 // 1574 vuint16m4_t test_vror_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { 1575 return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); 1576 } 1577 1578 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vror_vv_u16m8_mu 1579 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1580 // CHECK-RV64-NEXT: entry: 1581 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1) 1582 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]] 1583 // 1584 vuint16m8_t test_vror_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { 1585 return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); 1586 } 1587 1588 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vror_vx_u16m8_mu 1589 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1590 // CHECK-RV64-NEXT: entry: 1591 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VS2]], i64 [[RS1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1) 1592 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]] 1593 // 1594 vuint16m8_t test_vror_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { 1595 return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); 1596 } 1597 1598 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vror_vv_u32mf2_mu 1599 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1600 // CHECK-RV64-NEXT: entry: 1601 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1) 1602 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]] 1603 // 1604 vuint32mf2_t test_vror_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { 1605 return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); 1606 } 1607 1608 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vror_vx_u32mf2_mu 1609 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1610 // CHECK-RV64-NEXT: entry: 1611 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VS2]], i64 [[RS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1) 1612 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]] 1613 // 1614 vuint32mf2_t test_vror_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { 1615 return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); 1616 } 1617 1618 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vror_vv_u32m1_mu 1619 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1620 // CHECK-RV64-NEXT: entry: 1621 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1) 1622 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]] 1623 // 1624 vuint32m1_t test_vror_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { 1625 return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); 1626 } 1627 1628 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vror_vx_u32m1_mu 1629 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1630 // CHECK-RV64-NEXT: entry: 1631 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VS2]], i64 [[RS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1) 1632 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]] 1633 // 1634 vuint32m1_t test_vror_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { 1635 return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); 1636 } 1637 1638 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vror_vv_u32m2_mu 1639 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1640 // CHECK-RV64-NEXT: entry: 1641 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1) 1642 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]] 1643 // 1644 vuint32m2_t test_vror_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { 1645 return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); 1646 } 1647 1648 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vror_vx_u32m2_mu 1649 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1650 // CHECK-RV64-NEXT: entry: 1651 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VS2]], i64 [[RS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1) 1652 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]] 1653 // 1654 vuint32m2_t test_vror_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { 1655 return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); 1656 } 1657 1658 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vror_vv_u32m4_mu 1659 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1660 // CHECK-RV64-NEXT: entry: 1661 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1) 1662 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]] 1663 // 1664 vuint32m4_t test_vror_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { 1665 return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); 1666 } 1667 1668 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vror_vx_u32m4_mu 1669 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1670 // CHECK-RV64-NEXT: entry: 1671 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VS2]], i64 [[RS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1) 1672 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]] 1673 // 1674 vuint32m4_t test_vror_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { 1675 return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); 1676 } 1677 1678 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vror_vv_u32m8_mu 1679 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1680 // CHECK-RV64-NEXT: entry: 1681 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1) 1682 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]] 1683 // 1684 vuint32m8_t test_vror_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { 1685 return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); 1686 } 1687 1688 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vror_vx_u32m8_mu 1689 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1690 // CHECK-RV64-NEXT: entry: 1691 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VS2]], i64 [[RS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1) 1692 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]] 1693 // 1694 vuint32m8_t test_vror_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { 1695 return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); 1696 } 1697 1698 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vror_vv_u64m1_mu 1699 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1700 // CHECK-RV64-NEXT: entry: 1701 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1) 1702 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] 1703 // 1704 vuint64m1_t test_vror_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { 1705 return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); 1706 } 1707 1708 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vror_vx_u64m1_mu 1709 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1710 // CHECK-RV64-NEXT: entry: 1711 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VS2]], i64 [[RS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1) 1712 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] 1713 // 1714 vuint64m1_t test_vror_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { 1715 return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); 1716 } 1717 1718 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vror_vv_u64m2_mu 1719 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1720 // CHECK-RV64-NEXT: entry: 1721 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1) 1722 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] 1723 // 1724 vuint64m2_t test_vror_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { 1725 return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); 1726 } 1727 1728 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vror_vx_u64m2_mu 1729 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1730 // CHECK-RV64-NEXT: entry: 1731 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VS2]], i64 [[RS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1) 1732 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] 1733 // 1734 vuint64m2_t test_vror_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { 1735 return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); 1736 } 1737 1738 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vror_vv_u64m4_mu 1739 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1740 // CHECK-RV64-NEXT: entry: 1741 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1) 1742 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] 1743 // 1744 vuint64m4_t test_vror_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { 1745 return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); 1746 } 1747 1748 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vror_vx_u64m4_mu 1749 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1750 // CHECK-RV64-NEXT: entry: 1751 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VS2]], i64 [[RS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1) 1752 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] 1753 // 1754 vuint64m4_t test_vror_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { 1755 return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); 1756 } 1757 1758 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vror_vv_u64m8_mu 1759 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1760 // CHECK-RV64-NEXT: entry: 1761 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1) 1762 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] 1763 // 1764 vuint64m8_t test_vror_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { 1765 return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); 1766 } 1767 1768 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vror_vx_u64m8_mu 1769 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { 1770 // CHECK-RV64-NEXT: entry: 1771 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VS2]], i64 [[RS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1) 1772 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] 1773 // 1774 vuint64m8_t test_vror_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { 1775 return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); 1776 } 1777 1778