1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ 3; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ 5; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 6 7declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.nxv1i8( 8 <vscale x 1 x i8>, 9 <vscale x 1 x i8>, 10 iXLen); 11 12define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind { 13; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8: 14; CHECK: # %bb.0: # %entry 15; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 16; CHECK-NEXT: vmsbc.vv v0, v8, v9 17; CHECK-NEXT: ret 18entry: 19 %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.nxv1i8( 20 <vscale x 1 x i8> %0, 21 <vscale x 1 x i8> %1, 22 iXLen %2) 23 24 ret <vscale x 1 x i1> %a 25} 26 27declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.nxv2i8( 28 <vscale x 2 x i8>, 29 <vscale x 2 x i8>, 30 iXLen); 31 32define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind { 33; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8: 34; CHECK: # %bb.0: # %entry 35; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 36; CHECK-NEXT: vmsbc.vv v0, v8, v9 37; CHECK-NEXT: ret 38entry: 39 %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.nxv2i8( 40 <vscale x 2 x i8> %0, 41 <vscale x 2 x i8> %1, 42 iXLen %2) 43 44 ret <vscale x 2 x i1> %a 45} 46 47declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.nxv4i8( 48 <vscale x 4 x i8>, 49 <vscale x 4 x i8>, 50 iXLen); 51 52define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind { 53; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8: 54; CHECK: # %bb.0: # %entry 55; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma 56; CHECK-NEXT: vmsbc.vv v0, v8, v9 57; CHECK-NEXT: ret 58entry: 59 %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.nxv4i8( 60 <vscale x 4 x i8> %0, 61 <vscale x 4 x i8> %1, 62 iXLen %2) 63 64 ret <vscale x 4 x i1> %a 65} 66 67declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.nxv8i8( 68 <vscale x 8 x i8>, 69 <vscale x 8 x i8>, 70 iXLen); 71 72define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind { 73; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8: 74; CHECK: # %bb.0: # %entry 75; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 76; CHECK-NEXT: vmsbc.vv v0, v8, v9 77; CHECK-NEXT: ret 78entry: 79 %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.nxv8i8( 80 <vscale x 8 x i8> %0, 81 <vscale x 8 x i8> %1, 82 iXLen %2) 83 84 ret <vscale x 8 x i1> %a 85} 86 87declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.nxv16i8( 88 <vscale x 16 x i8>, 89 <vscale x 16 x i8>, 90 iXLen); 91 92define <vscale x 16 x i1> @intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind { 93; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8: 94; CHECK: # %bb.0: # %entry 95; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma 96; CHECK-NEXT: vmsbc.vv v0, v8, v10 97; CHECK-NEXT: ret 98entry: 99 %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.nxv16i8( 100 <vscale x 16 x i8> %0, 101 <vscale x 16 x i8> %1, 102 iXLen %2) 103 104 ret <vscale x 16 x i1> %a 105} 106 107declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.nxv32i8( 108 <vscale x 32 x i8>, 109 <vscale x 32 x i8>, 110 iXLen); 111 112define <vscale x 32 x i1> @intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind { 113; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8: 114; CHECK: # %bb.0: # %entry 115; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma 116; CHECK-NEXT: vmsbc.vv v0, v8, v12 117; CHECK-NEXT: ret 118entry: 119 %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.nxv32i8( 120 <vscale x 32 x i8> %0, 121 <vscale x 32 x i8> %1, 122 iXLen %2) 123 124 ret <vscale x 32 x i1> %a 125} 126 127declare <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.nxv64i8( 128 <vscale x 64 x i8>, 129 <vscale x 64 x i8>, 130 iXLen); 131 132define <vscale x 64 x i1> @intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind { 133; CHECK-LABEL: intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8: 134; CHECK: # %bb.0: # %entry 135; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma 136; CHECK-NEXT: vmsbc.vv v0, v8, v16 137; CHECK-NEXT: ret 138entry: 139 %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.nxv64i8( 140 <vscale x 64 x i8> %0, 141 <vscale x 64 x i8> %1, 142 iXLen %2) 143 144 ret <vscale x 64 x i1> %a 145} 146 147declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.nxv1i16( 148 <vscale x 1 x i16>, 149 <vscale x 1 x i16>, 150 iXLen); 151 152define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind { 153; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16: 154; CHECK: # %bb.0: # %entry 155; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 156; CHECK-NEXT: vmsbc.vv v0, v8, v9 157; CHECK-NEXT: ret 158entry: 159 %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.nxv1i16( 160 <vscale x 1 x i16> %0, 161 <vscale x 1 x i16> %1, 162 iXLen %2) 163 164 ret <vscale x 1 x i1> %a 165} 166 167declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.nxv2i16( 168 <vscale x 2 x i16>, 169 <vscale x 2 x i16>, 170 iXLen); 171 172define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind { 173; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16: 174; CHECK: # %bb.0: # %entry 175; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 176; CHECK-NEXT: vmsbc.vv v0, v8, v9 177; CHECK-NEXT: ret 178entry: 179 %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.nxv2i16( 180 <vscale x 2 x i16> %0, 181 <vscale x 2 x i16> %1, 182 iXLen %2) 183 184 ret <vscale x 2 x i1> %a 185} 186 187declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.nxv4i16( 188 <vscale x 4 x i16>, 189 <vscale x 4 x i16>, 190 iXLen); 191 192define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind { 193; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16: 194; CHECK: # %bb.0: # %entry 195; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 196; CHECK-NEXT: vmsbc.vv v0, v8, v9 197; CHECK-NEXT: ret 198entry: 199 %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.nxv4i16( 200 <vscale x 4 x i16> %0, 201 <vscale x 4 x i16> %1, 202 iXLen %2) 203 204 ret <vscale x 4 x i1> %a 205} 206 207declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.nxv8i16( 208 <vscale x 8 x i16>, 209 <vscale x 8 x i16>, 210 iXLen); 211 212define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind { 213; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16: 214; CHECK: # %bb.0: # %entry 215; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 216; CHECK-NEXT: vmsbc.vv v0, v8, v10 217; CHECK-NEXT: ret 218entry: 219 %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.nxv8i16( 220 <vscale x 8 x i16> %0, 221 <vscale x 8 x i16> %1, 222 iXLen %2) 223 224 ret <vscale x 8 x i1> %a 225} 226 227declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.nxv16i16( 228 <vscale x 16 x i16>, 229 <vscale x 16 x i16>, 230 iXLen); 231 232define <vscale x 16 x i1> @intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind { 233; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16: 234; CHECK: # %bb.0: # %entry 235; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma 236; CHECK-NEXT: vmsbc.vv v0, v8, v12 237; CHECK-NEXT: ret 238entry: 239 %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.nxv16i16( 240 <vscale x 16 x i16> %0, 241 <vscale x 16 x i16> %1, 242 iXLen %2) 243 244 ret <vscale x 16 x i1> %a 245} 246 247declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.nxv32i16( 248 <vscale x 32 x i16>, 249 <vscale x 32 x i16>, 250 iXLen); 251 252define <vscale x 32 x i1> @intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind { 253; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16: 254; CHECK: # %bb.0: # %entry 255; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma 256; CHECK-NEXT: vmsbc.vv v0, v8, v16 257; CHECK-NEXT: ret 258entry: 259 %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.nxv32i16( 260 <vscale x 32 x i16> %0, 261 <vscale x 32 x i16> %1, 262 iXLen %2) 263 264 ret <vscale x 32 x i1> %a 265} 266 267declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.nxv1i32( 268 <vscale x 1 x i32>, 269 <vscale x 1 x i32>, 270 iXLen); 271 272define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind { 273; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32: 274; CHECK: # %bb.0: # %entry 275; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 276; CHECK-NEXT: vmsbc.vv v0, v8, v9 277; CHECK-NEXT: ret 278entry: 279 %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.nxv1i32( 280 <vscale x 1 x i32> %0, 281 <vscale x 1 x i32> %1, 282 iXLen %2) 283 284 ret <vscale x 1 x i1> %a 285} 286 287declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.nxv2i32( 288 <vscale x 2 x i32>, 289 <vscale x 2 x i32>, 290 iXLen); 291 292define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind { 293; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32: 294; CHECK: # %bb.0: # %entry 295; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 296; CHECK-NEXT: vmsbc.vv v0, v8, v9 297; CHECK-NEXT: ret 298entry: 299 %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.nxv2i32( 300 <vscale x 2 x i32> %0, 301 <vscale x 2 x i32> %1, 302 iXLen %2) 303 304 ret <vscale x 2 x i1> %a 305} 306 307declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.nxv4i32( 308 <vscale x 4 x i32>, 309 <vscale x 4 x i32>, 310 iXLen); 311 312define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind { 313; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32: 314; CHECK: # %bb.0: # %entry 315; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 316; CHECK-NEXT: vmsbc.vv v0, v8, v10 317; CHECK-NEXT: ret 318entry: 319 %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.nxv4i32( 320 <vscale x 4 x i32> %0, 321 <vscale x 4 x i32> %1, 322 iXLen %2) 323 324 ret <vscale x 4 x i1> %a 325} 326 327declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.nxv8i32( 328 <vscale x 8 x i32>, 329 <vscale x 8 x i32>, 330 iXLen); 331 332define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind { 333; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32: 334; CHECK: # %bb.0: # %entry 335; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 336; CHECK-NEXT: vmsbc.vv v0, v8, v12 337; CHECK-NEXT: ret 338entry: 339 %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.nxv8i32( 340 <vscale x 8 x i32> %0, 341 <vscale x 8 x i32> %1, 342 iXLen %2) 343 344 ret <vscale x 8 x i1> %a 345} 346 347declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.nxv16i32( 348 <vscale x 16 x i32>, 349 <vscale x 16 x i32>, 350 iXLen); 351 352define <vscale x 16 x i1> @intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind { 353; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32: 354; CHECK: # %bb.0: # %entry 355; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma 356; CHECK-NEXT: vmsbc.vv v0, v8, v16 357; CHECK-NEXT: ret 358entry: 359 %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.nxv16i32( 360 <vscale x 16 x i32> %0, 361 <vscale x 16 x i32> %1, 362 iXLen %2) 363 364 ret <vscale x 16 x i1> %a 365} 366 367declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.nxv1i64( 368 <vscale x 1 x i64>, 369 <vscale x 1 x i64>, 370 iXLen); 371 372define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind { 373; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64: 374; CHECK: # %bb.0: # %entry 375; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 376; CHECK-NEXT: vmsbc.vv v0, v8, v9 377; CHECK-NEXT: ret 378entry: 379 %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.nxv1i64( 380 <vscale x 1 x i64> %0, 381 <vscale x 1 x i64> %1, 382 iXLen %2) 383 384 ret <vscale x 1 x i1> %a 385} 386 387declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.nxv2i64( 388 <vscale x 2 x i64>, 389 <vscale x 2 x i64>, 390 iXLen); 391 392define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind { 393; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64: 394; CHECK: # %bb.0: # %entry 395; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 396; CHECK-NEXT: vmsbc.vv v0, v8, v10 397; CHECK-NEXT: ret 398entry: 399 %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.nxv2i64( 400 <vscale x 2 x i64> %0, 401 <vscale x 2 x i64> %1, 402 iXLen %2) 403 404 ret <vscale x 2 x i1> %a 405} 406 407declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.nxv4i64( 408 <vscale x 4 x i64>, 409 <vscale x 4 x i64>, 410 iXLen); 411 412define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind { 413; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64: 414; CHECK: # %bb.0: # %entry 415; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 416; CHECK-NEXT: vmsbc.vv v0, v8, v12 417; CHECK-NEXT: ret 418entry: 419 %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.nxv4i64( 420 <vscale x 4 x i64> %0, 421 <vscale x 4 x i64> %1, 422 iXLen %2) 423 424 ret <vscale x 4 x i1> %a 425} 426 427declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.nxv8i64( 428 <vscale x 8 x i64>, 429 <vscale x 8 x i64>, 430 iXLen); 431 432define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind { 433; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64: 434; CHECK: # %bb.0: # %entry 435; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 436; CHECK-NEXT: vmsbc.vv v0, v8, v16 437; CHECK-NEXT: ret 438entry: 439 %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.nxv8i64( 440 <vscale x 8 x i64> %0, 441 <vscale x 8 x i64> %1, 442 iXLen %2) 443 444 ret <vscale x 8 x i1> %a 445} 446 447declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.i8( 448 <vscale x 1 x i8>, 449 i8, 450 iXLen); 451 452define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind { 453; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8: 454; CHECK: # %bb.0: # %entry 455; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 456; CHECK-NEXT: vmsbc.vx v0, v8, a0 457; CHECK-NEXT: ret 458entry: 459 %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.i8( 460 <vscale x 1 x i8> %0, 461 i8 %1, 462 iXLen %2) 463 464 ret <vscale x 1 x i1> %a 465} 466 467declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.i8( 468 <vscale x 2 x i8>, 469 i8, 470 iXLen); 471 472define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind { 473; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8: 474; CHECK: # %bb.0: # %entry 475; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 476; CHECK-NEXT: vmsbc.vx v0, v8, a0 477; CHECK-NEXT: ret 478entry: 479 %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.i8( 480 <vscale x 2 x i8> %0, 481 i8 %1, 482 iXLen %2) 483 484 ret <vscale x 2 x i1> %a 485} 486 487declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.i8( 488 <vscale x 4 x i8>, 489 i8, 490 iXLen); 491 492define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind { 493; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8: 494; CHECK: # %bb.0: # %entry 495; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 496; CHECK-NEXT: vmsbc.vx v0, v8, a0 497; CHECK-NEXT: ret 498entry: 499 %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.i8( 500 <vscale x 4 x i8> %0, 501 i8 %1, 502 iXLen %2) 503 504 ret <vscale x 4 x i1> %a 505} 506 507declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.i8( 508 <vscale x 8 x i8>, 509 i8, 510 iXLen); 511 512define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind { 513; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8: 514; CHECK: # %bb.0: # %entry 515; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 516; CHECK-NEXT: vmsbc.vx v0, v8, a0 517; CHECK-NEXT: ret 518entry: 519 %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.i8( 520 <vscale x 8 x i8> %0, 521 i8 %1, 522 iXLen %2) 523 524 ret <vscale x 8 x i1> %a 525} 526 527declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.i8( 528 <vscale x 16 x i8>, 529 i8, 530 iXLen); 531 532define <vscale x 16 x i1> @intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind { 533; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8: 534; CHECK: # %bb.0: # %entry 535; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 536; CHECK-NEXT: vmsbc.vx v0, v8, a0 537; CHECK-NEXT: ret 538entry: 539 %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.i8( 540 <vscale x 16 x i8> %0, 541 i8 %1, 542 iXLen %2) 543 544 ret <vscale x 16 x i1> %a 545} 546 547declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.i8( 548 <vscale x 32 x i8>, 549 i8, 550 iXLen); 551 552define <vscale x 32 x i1> @intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind { 553; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8: 554; CHECK: # %bb.0: # %entry 555; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma 556; CHECK-NEXT: vmsbc.vx v0, v8, a0 557; CHECK-NEXT: ret 558entry: 559 %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.i8( 560 <vscale x 32 x i8> %0, 561 i8 %1, 562 iXLen %2) 563 564 ret <vscale x 32 x i1> %a 565} 566 567declare <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.i8( 568 <vscale x 64 x i8>, 569 i8, 570 iXLen); 571 572define <vscale x 64 x i1> @intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind { 573; CHECK-LABEL: intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8: 574; CHECK: # %bb.0: # %entry 575; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma 576; CHECK-NEXT: vmsbc.vx v0, v8, a0 577; CHECK-NEXT: ret 578entry: 579 %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.i8( 580 <vscale x 64 x i8> %0, 581 i8 %1, 582 iXLen %2) 583 584 ret <vscale x 64 x i1> %a 585} 586 587declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.i16( 588 <vscale x 1 x i16>, 589 i16, 590 iXLen); 591 592define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind { 593; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16: 594; CHECK: # %bb.0: # %entry 595; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 596; CHECK-NEXT: vmsbc.vx v0, v8, a0 597; CHECK-NEXT: ret 598entry: 599 %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.i16( 600 <vscale x 1 x i16> %0, 601 i16 %1, 602 iXLen %2) 603 604 ret <vscale x 1 x i1> %a 605} 606 607declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.i16( 608 <vscale x 2 x i16>, 609 i16, 610 iXLen); 611 612define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind { 613; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16: 614; CHECK: # %bb.0: # %entry 615; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 616; CHECK-NEXT: vmsbc.vx v0, v8, a0 617; CHECK-NEXT: ret 618entry: 619 %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.i16( 620 <vscale x 2 x i16> %0, 621 i16 %1, 622 iXLen %2) 623 624 ret <vscale x 2 x i1> %a 625} 626 627declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.i16( 628 <vscale x 4 x i16>, 629 i16, 630 iXLen); 631 632define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind { 633; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16: 634; CHECK: # %bb.0: # %entry 635; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 636; CHECK-NEXT: vmsbc.vx v0, v8, a0 637; CHECK-NEXT: ret 638entry: 639 %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.i16( 640 <vscale x 4 x i16> %0, 641 i16 %1, 642 iXLen %2) 643 644 ret <vscale x 4 x i1> %a 645} 646 647declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16( 648 <vscale x 8 x i16>, 649 i16, 650 iXLen); 651 652define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind { 653; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16: 654; CHECK: # %bb.0: # %entry 655; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 656; CHECK-NEXT: vmsbc.vx v0, v8, a0 657; CHECK-NEXT: ret 658entry: 659 %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16( 660 <vscale x 8 x i16> %0, 661 i16 %1, 662 iXLen %2) 663 664 ret <vscale x 8 x i1> %a 665} 666 667declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.i16( 668 <vscale x 16 x i16>, 669 i16, 670 iXLen); 671 672define <vscale x 16 x i1> @intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind { 673; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16: 674; CHECK: # %bb.0: # %entry 675; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 676; CHECK-NEXT: vmsbc.vx v0, v8, a0 677; CHECK-NEXT: ret 678entry: 679 %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.i16( 680 <vscale x 16 x i16> %0, 681 i16 %1, 682 iXLen %2) 683 684 ret <vscale x 16 x i1> %a 685} 686 687declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.i16( 688 <vscale x 32 x i16>, 689 i16, 690 iXLen); 691 692define <vscale x 32 x i1> @intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind { 693; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16: 694; CHECK: # %bb.0: # %entry 695; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma 696; CHECK-NEXT: vmsbc.vx v0, v8, a0 697; CHECK-NEXT: ret 698entry: 699 %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.i16( 700 <vscale x 32 x i16> %0, 701 i16 %1, 702 iXLen %2) 703 704 ret <vscale x 32 x i1> %a 705} 706 707declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.i32( 708 <vscale x 1 x i32>, 709 i32, 710 iXLen); 711 712define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind { 713; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32: 714; CHECK: # %bb.0: # %entry 715; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 716; CHECK-NEXT: vmsbc.vx v0, v8, a0 717; CHECK-NEXT: ret 718entry: 719 %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.i32( 720 <vscale x 1 x i32> %0, 721 i32 %1, 722 iXLen %2) 723 724 ret <vscale x 1 x i1> %a 725} 726 727declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.i32( 728 <vscale x 2 x i32>, 729 i32, 730 iXLen); 731 732define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind { 733; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32: 734; CHECK: # %bb.0: # %entry 735; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 736; CHECK-NEXT: vmsbc.vx v0, v8, a0 737; CHECK-NEXT: ret 738entry: 739 %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.i32( 740 <vscale x 2 x i32> %0, 741 i32 %1, 742 iXLen %2) 743 744 ret <vscale x 2 x i1> %a 745} 746 747declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.i32( 748 <vscale x 4 x i32>, 749 i32, 750 iXLen); 751 752define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind { 753; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32: 754; CHECK: # %bb.0: # %entry 755; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 756; CHECK-NEXT: vmsbc.vx v0, v8, a0 757; CHECK-NEXT: ret 758entry: 759 %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.i32( 760 <vscale x 4 x i32> %0, 761 i32 %1, 762 iXLen %2) 763 764 ret <vscale x 4 x i1> %a 765} 766 767declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.i32( 768 <vscale x 8 x i32>, 769 i32, 770 iXLen); 771 772define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind { 773; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32: 774; CHECK: # %bb.0: # %entry 775; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 776; CHECK-NEXT: vmsbc.vx v0, v8, a0 777; CHECK-NEXT: ret 778entry: 779 %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.i32( 780 <vscale x 8 x i32> %0, 781 i32 %1, 782 iXLen %2) 783 784 ret <vscale x 8 x i1> %a 785} 786 787declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.i32( 788 <vscale x 16 x i32>, 789 i32, 790 iXLen); 791 792define <vscale x 16 x i1> @intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind { 793; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32: 794; CHECK: # %bb.0: # %entry 795; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma 796; CHECK-NEXT: vmsbc.vx v0, v8, a0 797; CHECK-NEXT: ret 798entry: 799 %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.i32( 800 <vscale x 16 x i32> %0, 801 i32 %1, 802 iXLen %2) 803 804 ret <vscale x 16 x i1> %a 805} 806 807declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.i64( 808 <vscale x 1 x i64>, 809 i64, 810 iXLen); 811 812define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind { 813; RV32-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64: 814; RV32: # %bb.0: # %entry 815; RV32-NEXT: addi sp, sp, -16 816; RV32-NEXT: sw a0, 8(sp) 817; RV32-NEXT: sw a1, 12(sp) 818; RV32-NEXT: addi a0, sp, 8 819; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma 820; RV32-NEXT: vlse64.v v9, (a0), zero 821; RV32-NEXT: vmsbc.vv v0, v8, v9 822; RV32-NEXT: addi sp, sp, 16 823; RV32-NEXT: ret 824; 825; RV64-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64: 826; RV64: # %bb.0: # %entry 827; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma 828; RV64-NEXT: vmsbc.vx v0, v8, a0 829; RV64-NEXT: ret 830entry: 831 %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.i64( 832 <vscale x 1 x i64> %0, 833 i64 %1, 834 iXLen %2) 835 836 ret <vscale x 1 x i1> %a 837} 838 839declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.i64( 840 <vscale x 2 x i64>, 841 i64, 842 iXLen); 843 844define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind { 845; RV32-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64: 846; RV32: # %bb.0: # %entry 847; RV32-NEXT: addi sp, sp, -16 848; RV32-NEXT: sw a0, 8(sp) 849; RV32-NEXT: sw a1, 12(sp) 850; RV32-NEXT: addi a0, sp, 8 851; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma 852; RV32-NEXT: vlse64.v v10, (a0), zero 853; RV32-NEXT: vmsbc.vv v0, v8, v10 854; RV32-NEXT: addi sp, sp, 16 855; RV32-NEXT: ret 856; 857; RV64-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64: 858; RV64: # %bb.0: # %entry 859; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma 860; RV64-NEXT: vmsbc.vx v0, v8, a0 861; RV64-NEXT: ret 862entry: 863 %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.i64( 864 <vscale x 2 x i64> %0, 865 i64 %1, 866 iXLen %2) 867 868 ret <vscale x 2 x i1> %a 869} 870 871declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.i64( 872 <vscale x 4 x i64>, 873 i64, 874 iXLen); 875 876define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind { 877; RV32-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64: 878; RV32: # %bb.0: # %entry 879; RV32-NEXT: addi sp, sp, -16 880; RV32-NEXT: sw a0, 8(sp) 881; RV32-NEXT: sw a1, 12(sp) 882; RV32-NEXT: addi a0, sp, 8 883; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma 884; RV32-NEXT: vlse64.v v12, (a0), zero 885; RV32-NEXT: vmsbc.vv v0, v8, v12 886; RV32-NEXT: addi sp, sp, 16 887; RV32-NEXT: ret 888; 889; RV64-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64: 890; RV64: # %bb.0: # %entry 891; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma 892; RV64-NEXT: vmsbc.vx v0, v8, a0 893; RV64-NEXT: ret 894entry: 895 %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.i64( 896 <vscale x 4 x i64> %0, 897 i64 %1, 898 iXLen %2) 899 900 ret <vscale x 4 x i1> %a 901} 902 903declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.i64( 904 <vscale x 8 x i64>, 905 i64, 906 iXLen); 907 908define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind { 909; RV32-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64: 910; RV32: # %bb.0: # %entry 911; RV32-NEXT: addi sp, sp, -16 912; RV32-NEXT: sw a0, 8(sp) 913; RV32-NEXT: sw a1, 12(sp) 914; RV32-NEXT: addi a0, sp, 8 915; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma 916; RV32-NEXT: vlse64.v v16, (a0), zero 917; RV32-NEXT: vmsbc.vv v0, v8, v16 918; RV32-NEXT: addi sp, sp, 16 919; RV32-NEXT: ret 920; 921; RV64-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64: 922; RV64: # %bb.0: # %entry 923; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma 924; RV64-NEXT: vmsbc.vx v0, v8, a0 925; RV64-NEXT: ret 926entry: 927 %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.i64( 928 <vscale x 8 x i64> %0, 929 i64 %1, 930 iXLen %2) 931 932 ret <vscale x 8 x i1> %a 933} 934