1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \ 3; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s 4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ 5; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s 6 7declare void @llvm.riscv.vsse.nxv1i64( 8 <vscale x 1 x i64>, 9 ptr, 10 iXLen, 11 iXLen); 12 13define void @intrinsic_vsse_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 14; CHECK-LABEL: intrinsic_vsse_v_nxv1i64_nxv1i64: 15; CHECK: # %bb.0: # %entry 16; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma 17; CHECK-NEXT: vsse64.v v8, (a0), a1 18; CHECK-NEXT: ret 19entry: 20 call void @llvm.riscv.vsse.nxv1i64( 21 <vscale x 1 x i64> %0, 22 ptr %1, 23 iXLen %2, 24 iXLen %3) 25 26 ret void 27} 28 29declare void @llvm.riscv.vsse.mask.nxv1i64( 30 <vscale x 1 x i64>, 31 ptr, 32 iXLen, 33 <vscale x 1 x i1>, 34 iXLen); 35 36define void @intrinsic_vsse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 37; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i64_nxv1i64: 38; CHECK: # %bb.0: # %entry 39; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma 40; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t 41; CHECK-NEXT: ret 42entry: 43 call void @llvm.riscv.vsse.mask.nxv1i64( 44 <vscale x 1 x i64> %0, 45 ptr %1, 46 iXLen %2, 47 <vscale x 1 x i1> %3, 48 iXLen %4) 49 50 ret void 51} 52 53define void @intrinsic_vsse_allonesmask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 54; CHECK-LABEL: intrinsic_vsse_allonesmask_v_nxv1i64_nxv1i64: 55; CHECK: # %bb.0: # %entry 56; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma 57; CHECK-NEXT: vsse64.v v8, (a0), a1 58; CHECK-NEXT: ret 59entry: 60 call void @llvm.riscv.vsse.mask.nxv1i64( 61 <vscale x 1 x i64> %0, 62 ptr %1, 63 iXLen %2, 64 <vscale x 1 x i1> splat (i1 true), 65 iXLen %4) 66 67 ret void 68} 69 70declare void @llvm.riscv.vsse.nxv2i64( 71 <vscale x 2 x i64>, 72 ptr, 73 iXLen, 74 iXLen); 75 76define void @intrinsic_vsse_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 77; CHECK-LABEL: intrinsic_vsse_v_nxv2i64_nxv2i64: 78; CHECK: # %bb.0: # %entry 79; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma 80; CHECK-NEXT: vsse64.v v8, (a0), a1 81; CHECK-NEXT: ret 82entry: 83 call void @llvm.riscv.vsse.nxv2i64( 84 <vscale x 2 x i64> %0, 85 ptr %1, 86 iXLen %2, 87 iXLen %3) 88 89 ret void 90} 91 92declare void @llvm.riscv.vsse.mask.nxv2i64( 93 <vscale x 2 x i64>, 94 ptr, 95 iXLen, 96 <vscale x 2 x i1>, 97 iXLen); 98 99define void @intrinsic_vsse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 100; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i64_nxv2i64: 101; CHECK: # %bb.0: # %entry 102; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma 103; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t 104; CHECK-NEXT: ret 105entry: 106 call void @llvm.riscv.vsse.mask.nxv2i64( 107 <vscale x 2 x i64> %0, 108 ptr %1, 109 iXLen %2, 110 <vscale x 2 x i1> %3, 111 iXLen %4) 112 113 ret void 114} 115 116declare void @llvm.riscv.vsse.nxv4i64( 117 <vscale x 4 x i64>, 118 ptr, 119 iXLen, 120 iXLen); 121 122define void @intrinsic_vsse_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 123; CHECK-LABEL: intrinsic_vsse_v_nxv4i64_nxv4i64: 124; CHECK: # %bb.0: # %entry 125; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma 126; CHECK-NEXT: vsse64.v v8, (a0), a1 127; CHECK-NEXT: ret 128entry: 129 call void @llvm.riscv.vsse.nxv4i64( 130 <vscale x 4 x i64> %0, 131 ptr %1, 132 iXLen %2, 133 iXLen %3) 134 135 ret void 136} 137 138declare void @llvm.riscv.vsse.mask.nxv4i64( 139 <vscale x 4 x i64>, 140 ptr, 141 iXLen, 142 <vscale x 4 x i1>, 143 iXLen); 144 145define void @intrinsic_vsse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 146; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i64_nxv4i64: 147; CHECK: # %bb.0: # %entry 148; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma 149; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t 150; CHECK-NEXT: ret 151entry: 152 call void @llvm.riscv.vsse.mask.nxv4i64( 153 <vscale x 4 x i64> %0, 154 ptr %1, 155 iXLen %2, 156 <vscale x 4 x i1> %3, 157 iXLen %4) 158 159 ret void 160} 161 162declare void @llvm.riscv.vsse.nxv8i64( 163 <vscale x 8 x i64>, 164 ptr, 165 iXLen, 166 iXLen); 167 168define void @intrinsic_vsse_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 169; CHECK-LABEL: intrinsic_vsse_v_nxv8i64_nxv8i64: 170; CHECK: # %bb.0: # %entry 171; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma 172; CHECK-NEXT: vsse64.v v8, (a0), a1 173; CHECK-NEXT: ret 174entry: 175 call void @llvm.riscv.vsse.nxv8i64( 176 <vscale x 8 x i64> %0, 177 ptr %1, 178 iXLen %2, 179 iXLen %3) 180 181 ret void 182} 183 184declare void @llvm.riscv.vsse.mask.nxv8i64( 185 <vscale x 8 x i64>, 186 ptr, 187 iXLen, 188 <vscale x 8 x i1>, 189 iXLen); 190 191define void @intrinsic_vsse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 192; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i64_nxv8i64: 193; CHECK: # %bb.0: # %entry 194; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma 195; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t 196; CHECK-NEXT: ret 197entry: 198 call void @llvm.riscv.vsse.mask.nxv8i64( 199 <vscale x 8 x i64> %0, 200 ptr %1, 201 iXLen %2, 202 <vscale x 8 x i1> %3, 203 iXLen %4) 204 205 ret void 206} 207 208declare void @llvm.riscv.vsse.nxv1f64( 209 <vscale x 1 x double>, 210 ptr, 211 iXLen, 212 iXLen); 213 214define void @intrinsic_vsse_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 215; CHECK-LABEL: intrinsic_vsse_v_nxv1f64_nxv1f64: 216; CHECK: # %bb.0: # %entry 217; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma 218; CHECK-NEXT: vsse64.v v8, (a0), a1 219; CHECK-NEXT: ret 220entry: 221 call void @llvm.riscv.vsse.nxv1f64( 222 <vscale x 1 x double> %0, 223 ptr %1, 224 iXLen %2, 225 iXLen %3) 226 227 ret void 228} 229 230declare void @llvm.riscv.vsse.mask.nxv1f64( 231 <vscale x 1 x double>, 232 ptr, 233 iXLen, 234 <vscale x 1 x i1>, 235 iXLen); 236 237define void @intrinsic_vsse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 238; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f64_nxv1f64: 239; CHECK: # %bb.0: # %entry 240; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma 241; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t 242; CHECK-NEXT: ret 243entry: 244 call void @llvm.riscv.vsse.mask.nxv1f64( 245 <vscale x 1 x double> %0, 246 ptr %1, 247 iXLen %2, 248 <vscale x 1 x i1> %3, 249 iXLen %4) 250 251 ret void 252} 253 254declare void @llvm.riscv.vsse.nxv2f64( 255 <vscale x 2 x double>, 256 ptr, 257 iXLen, 258 iXLen); 259 260define void @intrinsic_vsse_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 261; CHECK-LABEL: intrinsic_vsse_v_nxv2f64_nxv2f64: 262; CHECK: # %bb.0: # %entry 263; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma 264; CHECK-NEXT: vsse64.v v8, (a0), a1 265; CHECK-NEXT: ret 266entry: 267 call void @llvm.riscv.vsse.nxv2f64( 268 <vscale x 2 x double> %0, 269 ptr %1, 270 iXLen %2, 271 iXLen %3) 272 273 ret void 274} 275 276declare void @llvm.riscv.vsse.mask.nxv2f64( 277 <vscale x 2 x double>, 278 ptr, 279 iXLen, 280 <vscale x 2 x i1>, 281 iXLen); 282 283define void @intrinsic_vsse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 284; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f64_nxv2f64: 285; CHECK: # %bb.0: # %entry 286; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma 287; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t 288; CHECK-NEXT: ret 289entry: 290 call void @llvm.riscv.vsse.mask.nxv2f64( 291 <vscale x 2 x double> %0, 292 ptr %1, 293 iXLen %2, 294 <vscale x 2 x i1> %3, 295 iXLen %4) 296 297 ret void 298} 299 300declare void @llvm.riscv.vsse.nxv4f64( 301 <vscale x 4 x double>, 302 ptr, 303 iXLen, 304 iXLen); 305 306define void @intrinsic_vsse_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 307; CHECK-LABEL: intrinsic_vsse_v_nxv4f64_nxv4f64: 308; CHECK: # %bb.0: # %entry 309; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma 310; CHECK-NEXT: vsse64.v v8, (a0), a1 311; CHECK-NEXT: ret 312entry: 313 call void @llvm.riscv.vsse.nxv4f64( 314 <vscale x 4 x double> %0, 315 ptr %1, 316 iXLen %2, 317 iXLen %3) 318 319 ret void 320} 321 322declare void @llvm.riscv.vsse.mask.nxv4f64( 323 <vscale x 4 x double>, 324 ptr, 325 iXLen, 326 <vscale x 4 x i1>, 327 iXLen); 328 329define void @intrinsic_vsse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 330; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f64_nxv4f64: 331; CHECK: # %bb.0: # %entry 332; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma 333; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t 334; CHECK-NEXT: ret 335entry: 336 call void @llvm.riscv.vsse.mask.nxv4f64( 337 <vscale x 4 x double> %0, 338 ptr %1, 339 iXLen %2, 340 <vscale x 4 x i1> %3, 341 iXLen %4) 342 343 ret void 344} 345 346declare void @llvm.riscv.vsse.nxv8f64( 347 <vscale x 8 x double>, 348 ptr, 349 iXLen, 350 iXLen); 351 352define void @intrinsic_vsse_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 353; CHECK-LABEL: intrinsic_vsse_v_nxv8f64_nxv8f64: 354; CHECK: # %bb.0: # %entry 355; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma 356; CHECK-NEXT: vsse64.v v8, (a0), a1 357; CHECK-NEXT: ret 358entry: 359 call void @llvm.riscv.vsse.nxv8f64( 360 <vscale x 8 x double> %0, 361 ptr %1, 362 iXLen %2, 363 iXLen %3) 364 365 ret void 366} 367 368declare void @llvm.riscv.vsse.mask.nxv8f64( 369 <vscale x 8 x double>, 370 ptr, 371 iXLen, 372 <vscale x 8 x i1>, 373 iXLen); 374 375define void @intrinsic_vsse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 376; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f64_nxv8f64: 377; CHECK: # %bb.0: # %entry 378; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma 379; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t 380; CHECK-NEXT: ret 381entry: 382 call void @llvm.riscv.vsse.mask.nxv8f64( 383 <vscale x 8 x double> %0, 384 ptr %1, 385 iXLen %2, 386 <vscale x 8 x i1> %3, 387 iXLen %4) 388 389 ret void 390} 391 392declare void @llvm.riscv.vsse.nxv1i32( 393 <vscale x 1 x i32>, 394 ptr, 395 iXLen, 396 iXLen); 397 398define void @intrinsic_vsse_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 399; CHECK-LABEL: intrinsic_vsse_v_nxv1i32_nxv1i32: 400; CHECK: # %bb.0: # %entry 401; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma 402; CHECK-NEXT: vsse32.v v8, (a0), a1 403; CHECK-NEXT: ret 404entry: 405 call void @llvm.riscv.vsse.nxv1i32( 406 <vscale x 1 x i32> %0, 407 ptr %1, 408 iXLen %2, 409 iXLen %3) 410 411 ret void 412} 413 414declare void @llvm.riscv.vsse.mask.nxv1i32( 415 <vscale x 1 x i32>, 416 ptr, 417 iXLen, 418 <vscale x 1 x i1>, 419 iXLen); 420 421define void @intrinsic_vsse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 422; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i32_nxv1i32: 423; CHECK: # %bb.0: # %entry 424; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma 425; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t 426; CHECK-NEXT: ret 427entry: 428 call void @llvm.riscv.vsse.mask.nxv1i32( 429 <vscale x 1 x i32> %0, 430 ptr %1, 431 iXLen %2, 432 <vscale x 1 x i1> %3, 433 iXLen %4) 434 435 ret void 436} 437 438declare void @llvm.riscv.vsse.nxv2i32( 439 <vscale x 2 x i32>, 440 ptr, 441 iXLen, 442 iXLen); 443 444define void @intrinsic_vsse_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 445; CHECK-LABEL: intrinsic_vsse_v_nxv2i32_nxv2i32: 446; CHECK: # %bb.0: # %entry 447; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma 448; CHECK-NEXT: vsse32.v v8, (a0), a1 449; CHECK-NEXT: ret 450entry: 451 call void @llvm.riscv.vsse.nxv2i32( 452 <vscale x 2 x i32> %0, 453 ptr %1, 454 iXLen %2, 455 iXLen %3) 456 457 ret void 458} 459 460declare void @llvm.riscv.vsse.mask.nxv2i32( 461 <vscale x 2 x i32>, 462 ptr, 463 iXLen, 464 <vscale x 2 x i1>, 465 iXLen); 466 467define void @intrinsic_vsse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 468; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i32_nxv2i32: 469; CHECK: # %bb.0: # %entry 470; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma 471; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t 472; CHECK-NEXT: ret 473entry: 474 call void @llvm.riscv.vsse.mask.nxv2i32( 475 <vscale x 2 x i32> %0, 476 ptr %1, 477 iXLen %2, 478 <vscale x 2 x i1> %3, 479 iXLen %4) 480 481 ret void 482} 483 484declare void @llvm.riscv.vsse.nxv4i32( 485 <vscale x 4 x i32>, 486 ptr, 487 iXLen, 488 iXLen); 489 490define void @intrinsic_vsse_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 491; CHECK-LABEL: intrinsic_vsse_v_nxv4i32_nxv4i32: 492; CHECK: # %bb.0: # %entry 493; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma 494; CHECK-NEXT: vsse32.v v8, (a0), a1 495; CHECK-NEXT: ret 496entry: 497 call void @llvm.riscv.vsse.nxv4i32( 498 <vscale x 4 x i32> %0, 499 ptr %1, 500 iXLen %2, 501 iXLen %3) 502 503 ret void 504} 505 506declare void @llvm.riscv.vsse.mask.nxv4i32( 507 <vscale x 4 x i32>, 508 ptr, 509 iXLen, 510 <vscale x 4 x i1>, 511 iXLen); 512 513define void @intrinsic_vsse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 514; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i32_nxv4i32: 515; CHECK: # %bb.0: # %entry 516; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma 517; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t 518; CHECK-NEXT: ret 519entry: 520 call void @llvm.riscv.vsse.mask.nxv4i32( 521 <vscale x 4 x i32> %0, 522 ptr %1, 523 iXLen %2, 524 <vscale x 4 x i1> %3, 525 iXLen %4) 526 527 ret void 528} 529 530declare void @llvm.riscv.vsse.nxv8i32( 531 <vscale x 8 x i32>, 532 ptr, 533 iXLen, 534 iXLen); 535 536define void @intrinsic_vsse_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 537; CHECK-LABEL: intrinsic_vsse_v_nxv8i32_nxv8i32: 538; CHECK: # %bb.0: # %entry 539; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma 540; CHECK-NEXT: vsse32.v v8, (a0), a1 541; CHECK-NEXT: ret 542entry: 543 call void @llvm.riscv.vsse.nxv8i32( 544 <vscale x 8 x i32> %0, 545 ptr %1, 546 iXLen %2, 547 iXLen %3) 548 549 ret void 550} 551 552declare void @llvm.riscv.vsse.mask.nxv8i32( 553 <vscale x 8 x i32>, 554 ptr, 555 iXLen, 556 <vscale x 8 x i1>, 557 iXLen); 558 559define void @intrinsic_vsse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 560; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i32_nxv8i32: 561; CHECK: # %bb.0: # %entry 562; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma 563; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t 564; CHECK-NEXT: ret 565entry: 566 call void @llvm.riscv.vsse.mask.nxv8i32( 567 <vscale x 8 x i32> %0, 568 ptr %1, 569 iXLen %2, 570 <vscale x 8 x i1> %3, 571 iXLen %4) 572 573 ret void 574} 575 576declare void @llvm.riscv.vsse.nxv16i32( 577 <vscale x 16 x i32>, 578 ptr, 579 iXLen, 580 iXLen); 581 582define void @intrinsic_vsse_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 583; CHECK-LABEL: intrinsic_vsse_v_nxv16i32_nxv16i32: 584; CHECK: # %bb.0: # %entry 585; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma 586; CHECK-NEXT: vsse32.v v8, (a0), a1 587; CHECK-NEXT: ret 588entry: 589 call void @llvm.riscv.vsse.nxv16i32( 590 <vscale x 16 x i32> %0, 591 ptr %1, 592 iXLen %2, 593 iXLen %3) 594 595 ret void 596} 597 598declare void @llvm.riscv.vsse.mask.nxv16i32( 599 <vscale x 16 x i32>, 600 ptr, 601 iXLen, 602 <vscale x 16 x i1>, 603 iXLen); 604 605define void @intrinsic_vsse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 606; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i32_nxv16i32: 607; CHECK: # %bb.0: # %entry 608; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma 609; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t 610; CHECK-NEXT: ret 611entry: 612 call void @llvm.riscv.vsse.mask.nxv16i32( 613 <vscale x 16 x i32> %0, 614 ptr %1, 615 iXLen %2, 616 <vscale x 16 x i1> %3, 617 iXLen %4) 618 619 ret void 620} 621 622declare void @llvm.riscv.vsse.nxv1f32( 623 <vscale x 1 x float>, 624 ptr, 625 iXLen, 626 iXLen); 627 628define void @intrinsic_vsse_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 629; CHECK-LABEL: intrinsic_vsse_v_nxv1f32_nxv1f32: 630; CHECK: # %bb.0: # %entry 631; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma 632; CHECK-NEXT: vsse32.v v8, (a0), a1 633; CHECK-NEXT: ret 634entry: 635 call void @llvm.riscv.vsse.nxv1f32( 636 <vscale x 1 x float> %0, 637 ptr %1, 638 iXLen %2, 639 iXLen %3) 640 641 ret void 642} 643 644declare void @llvm.riscv.vsse.mask.nxv1f32( 645 <vscale x 1 x float>, 646 ptr, 647 iXLen, 648 <vscale x 1 x i1>, 649 iXLen); 650 651define void @intrinsic_vsse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 652; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f32_nxv1f32: 653; CHECK: # %bb.0: # %entry 654; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma 655; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t 656; CHECK-NEXT: ret 657entry: 658 call void @llvm.riscv.vsse.mask.nxv1f32( 659 <vscale x 1 x float> %0, 660 ptr %1, 661 iXLen %2, 662 <vscale x 1 x i1> %3, 663 iXLen %4) 664 665 ret void 666} 667 668declare void @llvm.riscv.vsse.nxv2f32( 669 <vscale x 2 x float>, 670 ptr, 671 iXLen, 672 iXLen); 673 674define void @intrinsic_vsse_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 675; CHECK-LABEL: intrinsic_vsse_v_nxv2f32_nxv2f32: 676; CHECK: # %bb.0: # %entry 677; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma 678; CHECK-NEXT: vsse32.v v8, (a0), a1 679; CHECK-NEXT: ret 680entry: 681 call void @llvm.riscv.vsse.nxv2f32( 682 <vscale x 2 x float> %0, 683 ptr %1, 684 iXLen %2, 685 iXLen %3) 686 687 ret void 688} 689 690declare void @llvm.riscv.vsse.mask.nxv2f32( 691 <vscale x 2 x float>, 692 ptr, 693 iXLen, 694 <vscale x 2 x i1>, 695 iXLen); 696 697define void @intrinsic_vsse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 698; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f32_nxv2f32: 699; CHECK: # %bb.0: # %entry 700; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma 701; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t 702; CHECK-NEXT: ret 703entry: 704 call void @llvm.riscv.vsse.mask.nxv2f32( 705 <vscale x 2 x float> %0, 706 ptr %1, 707 iXLen %2, 708 <vscale x 2 x i1> %3, 709 iXLen %4) 710 711 ret void 712} 713 714declare void @llvm.riscv.vsse.nxv4f32( 715 <vscale x 4 x float>, 716 ptr, 717 iXLen, 718 iXLen); 719 720define void @intrinsic_vsse_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 721; CHECK-LABEL: intrinsic_vsse_v_nxv4f32_nxv4f32: 722; CHECK: # %bb.0: # %entry 723; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma 724; CHECK-NEXT: vsse32.v v8, (a0), a1 725; CHECK-NEXT: ret 726entry: 727 call void @llvm.riscv.vsse.nxv4f32( 728 <vscale x 4 x float> %0, 729 ptr %1, 730 iXLen %2, 731 iXLen %3) 732 733 ret void 734} 735 736declare void @llvm.riscv.vsse.mask.nxv4f32( 737 <vscale x 4 x float>, 738 ptr, 739 iXLen, 740 <vscale x 4 x i1>, 741 iXLen); 742 743define void @intrinsic_vsse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 744; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f32_nxv4f32: 745; CHECK: # %bb.0: # %entry 746; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma 747; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t 748; CHECK-NEXT: ret 749entry: 750 call void @llvm.riscv.vsse.mask.nxv4f32( 751 <vscale x 4 x float> %0, 752 ptr %1, 753 iXLen %2, 754 <vscale x 4 x i1> %3, 755 iXLen %4) 756 757 ret void 758} 759 760declare void @llvm.riscv.vsse.nxv8f32( 761 <vscale x 8 x float>, 762 ptr, 763 iXLen, 764 iXLen); 765 766define void @intrinsic_vsse_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 767; CHECK-LABEL: intrinsic_vsse_v_nxv8f32_nxv8f32: 768; CHECK: # %bb.0: # %entry 769; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma 770; CHECK-NEXT: vsse32.v v8, (a0), a1 771; CHECK-NEXT: ret 772entry: 773 call void @llvm.riscv.vsse.nxv8f32( 774 <vscale x 8 x float> %0, 775 ptr %1, 776 iXLen %2, 777 iXLen %3) 778 779 ret void 780} 781 782declare void @llvm.riscv.vsse.mask.nxv8f32( 783 <vscale x 8 x float>, 784 ptr, 785 iXLen, 786 <vscale x 8 x i1>, 787 iXLen); 788 789define void @intrinsic_vsse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 790; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f32_nxv8f32: 791; CHECK: # %bb.0: # %entry 792; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma 793; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t 794; CHECK-NEXT: ret 795entry: 796 call void @llvm.riscv.vsse.mask.nxv8f32( 797 <vscale x 8 x float> %0, 798 ptr %1, 799 iXLen %2, 800 <vscale x 8 x i1> %3, 801 iXLen %4) 802 803 ret void 804} 805 806declare void @llvm.riscv.vsse.nxv16f32( 807 <vscale x 16 x float>, 808 ptr, 809 iXLen, 810 iXLen); 811 812define void @intrinsic_vsse_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 813; CHECK-LABEL: intrinsic_vsse_v_nxv16f32_nxv16f32: 814; CHECK: # %bb.0: # %entry 815; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma 816; CHECK-NEXT: vsse32.v v8, (a0), a1 817; CHECK-NEXT: ret 818entry: 819 call void @llvm.riscv.vsse.nxv16f32( 820 <vscale x 16 x float> %0, 821 ptr %1, 822 iXLen %2, 823 iXLen %3) 824 825 ret void 826} 827 828declare void @llvm.riscv.vsse.mask.nxv16f32( 829 <vscale x 16 x float>, 830 ptr, 831 iXLen, 832 <vscale x 16 x i1>, 833 iXLen); 834 835define void @intrinsic_vsse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 836; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16f32_nxv16f32: 837; CHECK: # %bb.0: # %entry 838; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma 839; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t 840; CHECK-NEXT: ret 841entry: 842 call void @llvm.riscv.vsse.mask.nxv16f32( 843 <vscale x 16 x float> %0, 844 ptr %1, 845 iXLen %2, 846 <vscale x 16 x i1> %3, 847 iXLen %4) 848 849 ret void 850} 851 852declare void @llvm.riscv.vsse.nxv1i16( 853 <vscale x 1 x i16>, 854 ptr, 855 iXLen, 856 iXLen); 857 858define void @intrinsic_vsse_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 859; CHECK-LABEL: intrinsic_vsse_v_nxv1i16_nxv1i16: 860; CHECK: # %bb.0: # %entry 861; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 862; CHECK-NEXT: vsse16.v v8, (a0), a1 863; CHECK-NEXT: ret 864entry: 865 call void @llvm.riscv.vsse.nxv1i16( 866 <vscale x 1 x i16> %0, 867 ptr %1, 868 iXLen %2, 869 iXLen %3) 870 871 ret void 872} 873 874declare void @llvm.riscv.vsse.mask.nxv1i16( 875 <vscale x 1 x i16>, 876 ptr, 877 iXLen, 878 <vscale x 1 x i1>, 879 iXLen); 880 881define void @intrinsic_vsse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 882; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i16_nxv1i16: 883; CHECK: # %bb.0: # %entry 884; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 885; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t 886; CHECK-NEXT: ret 887entry: 888 call void @llvm.riscv.vsse.mask.nxv1i16( 889 <vscale x 1 x i16> %0, 890 ptr %1, 891 iXLen %2, 892 <vscale x 1 x i1> %3, 893 iXLen %4) 894 895 ret void 896} 897 898declare void @llvm.riscv.vsse.nxv2i16( 899 <vscale x 2 x i16>, 900 ptr, 901 iXLen, 902 iXLen); 903 904define void @intrinsic_vsse_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 905; CHECK-LABEL: intrinsic_vsse_v_nxv2i16_nxv2i16: 906; CHECK: # %bb.0: # %entry 907; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 908; CHECK-NEXT: vsse16.v v8, (a0), a1 909; CHECK-NEXT: ret 910entry: 911 call void @llvm.riscv.vsse.nxv2i16( 912 <vscale x 2 x i16> %0, 913 ptr %1, 914 iXLen %2, 915 iXLen %3) 916 917 ret void 918} 919 920declare void @llvm.riscv.vsse.mask.nxv2i16( 921 <vscale x 2 x i16>, 922 ptr, 923 iXLen, 924 <vscale x 2 x i1>, 925 iXLen); 926 927define void @intrinsic_vsse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 928; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i16_nxv2i16: 929; CHECK: # %bb.0: # %entry 930; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 931; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t 932; CHECK-NEXT: ret 933entry: 934 call void @llvm.riscv.vsse.mask.nxv2i16( 935 <vscale x 2 x i16> %0, 936 ptr %1, 937 iXLen %2, 938 <vscale x 2 x i1> %3, 939 iXLen %4) 940 941 ret void 942} 943 944declare void @llvm.riscv.vsse.nxv4i16( 945 <vscale x 4 x i16>, 946 ptr, 947 iXLen, 948 iXLen); 949 950define void @intrinsic_vsse_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 951; CHECK-LABEL: intrinsic_vsse_v_nxv4i16_nxv4i16: 952; CHECK: # %bb.0: # %entry 953; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 954; CHECK-NEXT: vsse16.v v8, (a0), a1 955; CHECK-NEXT: ret 956entry: 957 call void @llvm.riscv.vsse.nxv4i16( 958 <vscale x 4 x i16> %0, 959 ptr %1, 960 iXLen %2, 961 iXLen %3) 962 963 ret void 964} 965 966declare void @llvm.riscv.vsse.mask.nxv4i16( 967 <vscale x 4 x i16>, 968 ptr, 969 iXLen, 970 <vscale x 4 x i1>, 971 iXLen); 972 973define void @intrinsic_vsse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 974; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i16_nxv4i16: 975; CHECK: # %bb.0: # %entry 976; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 977; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t 978; CHECK-NEXT: ret 979entry: 980 call void @llvm.riscv.vsse.mask.nxv4i16( 981 <vscale x 4 x i16> %0, 982 ptr %1, 983 iXLen %2, 984 <vscale x 4 x i1> %3, 985 iXLen %4) 986 987 ret void 988} 989 990declare void @llvm.riscv.vsse.nxv8i16( 991 <vscale x 8 x i16>, 992 ptr, 993 iXLen, 994 iXLen); 995 996define void @intrinsic_vsse_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 997; CHECK-LABEL: intrinsic_vsse_v_nxv8i16_nxv8i16: 998; CHECK: # %bb.0: # %entry 999; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma 1000; CHECK-NEXT: vsse16.v v8, (a0), a1 1001; CHECK-NEXT: ret 1002entry: 1003 call void @llvm.riscv.vsse.nxv8i16( 1004 <vscale x 8 x i16> %0, 1005 ptr %1, 1006 iXLen %2, 1007 iXLen %3) 1008 1009 ret void 1010} 1011 1012declare void @llvm.riscv.vsse.mask.nxv8i16( 1013 <vscale x 8 x i16>, 1014 ptr, 1015 iXLen, 1016 <vscale x 8 x i1>, 1017 iXLen); 1018 1019define void @intrinsic_vsse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1020; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i16_nxv8i16: 1021; CHECK: # %bb.0: # %entry 1022; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma 1023; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t 1024; CHECK-NEXT: ret 1025entry: 1026 call void @llvm.riscv.vsse.mask.nxv8i16( 1027 <vscale x 8 x i16> %0, 1028 ptr %1, 1029 iXLen %2, 1030 <vscale x 8 x i1> %3, 1031 iXLen %4) 1032 1033 ret void 1034} 1035 1036declare void @llvm.riscv.vsse.nxv16i16( 1037 <vscale x 16 x i16>, 1038 ptr, 1039 iXLen, 1040 iXLen); 1041 1042define void @intrinsic_vsse_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 1043; CHECK-LABEL: intrinsic_vsse_v_nxv16i16_nxv16i16: 1044; CHECK: # %bb.0: # %entry 1045; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma 1046; CHECK-NEXT: vsse16.v v8, (a0), a1 1047; CHECK-NEXT: ret 1048entry: 1049 call void @llvm.riscv.vsse.nxv16i16( 1050 <vscale x 16 x i16> %0, 1051 ptr %1, 1052 iXLen %2, 1053 iXLen %3) 1054 1055 ret void 1056} 1057 1058declare void @llvm.riscv.vsse.mask.nxv16i16( 1059 <vscale x 16 x i16>, 1060 ptr, 1061 iXLen, 1062 <vscale x 16 x i1>, 1063 iXLen); 1064 1065define void @intrinsic_vsse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 1066; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i16_nxv16i16: 1067; CHECK: # %bb.0: # %entry 1068; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma 1069; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t 1070; CHECK-NEXT: ret 1071entry: 1072 call void @llvm.riscv.vsse.mask.nxv16i16( 1073 <vscale x 16 x i16> %0, 1074 ptr %1, 1075 iXLen %2, 1076 <vscale x 16 x i1> %3, 1077 iXLen %4) 1078 1079 ret void 1080} 1081 1082declare void @llvm.riscv.vsse.nxv32i16( 1083 <vscale x 32 x i16>, 1084 ptr, 1085 iXLen, 1086 iXLen); 1087 1088define void @intrinsic_vsse_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 1089; CHECK-LABEL: intrinsic_vsse_v_nxv32i16_nxv32i16: 1090; CHECK: # %bb.0: # %entry 1091; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma 1092; CHECK-NEXT: vsse16.v v8, (a0), a1 1093; CHECK-NEXT: ret 1094entry: 1095 call void @llvm.riscv.vsse.nxv32i16( 1096 <vscale x 32 x i16> %0, 1097 ptr %1, 1098 iXLen %2, 1099 iXLen %3) 1100 1101 ret void 1102} 1103 1104declare void @llvm.riscv.vsse.mask.nxv32i16( 1105 <vscale x 32 x i16>, 1106 ptr, 1107 iXLen, 1108 <vscale x 32 x i1>, 1109 iXLen); 1110 1111define void @intrinsic_vsse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 1112; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32i16_nxv32i16: 1113; CHECK: # %bb.0: # %entry 1114; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma 1115; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t 1116; CHECK-NEXT: ret 1117entry: 1118 call void @llvm.riscv.vsse.mask.nxv32i16( 1119 <vscale x 32 x i16> %0, 1120 ptr %1, 1121 iXLen %2, 1122 <vscale x 32 x i1> %3, 1123 iXLen %4) 1124 1125 ret void 1126} 1127 1128declare void @llvm.riscv.vsse.nxv1f16( 1129 <vscale x 1 x half>, 1130 ptr, 1131 iXLen, 1132 iXLen); 1133 1134define void @intrinsic_vsse_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 1135; CHECK-LABEL: intrinsic_vsse_v_nxv1f16_nxv1f16: 1136; CHECK: # %bb.0: # %entry 1137; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 1138; CHECK-NEXT: vsse16.v v8, (a0), a1 1139; CHECK-NEXT: ret 1140entry: 1141 call void @llvm.riscv.vsse.nxv1f16( 1142 <vscale x 1 x half> %0, 1143 ptr %1, 1144 iXLen %2, 1145 iXLen %3) 1146 1147 ret void 1148} 1149 1150declare void @llvm.riscv.vsse.mask.nxv1f16( 1151 <vscale x 1 x half>, 1152 ptr, 1153 iXLen, 1154 <vscale x 1 x i1>, 1155 iXLen); 1156 1157define void @intrinsic_vsse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 1158; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f16_nxv1f16: 1159; CHECK: # %bb.0: # %entry 1160; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 1161; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t 1162; CHECK-NEXT: ret 1163entry: 1164 call void @llvm.riscv.vsse.mask.nxv1f16( 1165 <vscale x 1 x half> %0, 1166 ptr %1, 1167 iXLen %2, 1168 <vscale x 1 x i1> %3, 1169 iXLen %4) 1170 1171 ret void 1172} 1173 1174declare void @llvm.riscv.vsse.nxv2f16( 1175 <vscale x 2 x half>, 1176 ptr, 1177 iXLen, 1178 iXLen); 1179 1180define void @intrinsic_vsse_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 1181; CHECK-LABEL: intrinsic_vsse_v_nxv2f16_nxv2f16: 1182; CHECK: # %bb.0: # %entry 1183; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 1184; CHECK-NEXT: vsse16.v v8, (a0), a1 1185; CHECK-NEXT: ret 1186entry: 1187 call void @llvm.riscv.vsse.nxv2f16( 1188 <vscale x 2 x half> %0, 1189 ptr %1, 1190 iXLen %2, 1191 iXLen %3) 1192 1193 ret void 1194} 1195 1196declare void @llvm.riscv.vsse.mask.nxv2f16( 1197 <vscale x 2 x half>, 1198 ptr, 1199 iXLen, 1200 <vscale x 2 x i1>, 1201 iXLen); 1202 1203define void @intrinsic_vsse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 1204; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f16_nxv2f16: 1205; CHECK: # %bb.0: # %entry 1206; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 1207; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t 1208; CHECK-NEXT: ret 1209entry: 1210 call void @llvm.riscv.vsse.mask.nxv2f16( 1211 <vscale x 2 x half> %0, 1212 ptr %1, 1213 iXLen %2, 1214 <vscale x 2 x i1> %3, 1215 iXLen %4) 1216 1217 ret void 1218} 1219 1220declare void @llvm.riscv.vsse.nxv4f16( 1221 <vscale x 4 x half>, 1222 ptr, 1223 iXLen, 1224 iXLen); 1225 1226define void @intrinsic_vsse_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 1227; CHECK-LABEL: intrinsic_vsse_v_nxv4f16_nxv4f16: 1228; CHECK: # %bb.0: # %entry 1229; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 1230; CHECK-NEXT: vsse16.v v8, (a0), a1 1231; CHECK-NEXT: ret 1232entry: 1233 call void @llvm.riscv.vsse.nxv4f16( 1234 <vscale x 4 x half> %0, 1235 ptr %1, 1236 iXLen %2, 1237 iXLen %3) 1238 1239 ret void 1240} 1241 1242declare void @llvm.riscv.vsse.mask.nxv4f16( 1243 <vscale x 4 x half>, 1244 ptr, 1245 iXLen, 1246 <vscale x 4 x i1>, 1247 iXLen); 1248 1249define void @intrinsic_vsse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1250; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f16_nxv4f16: 1251; CHECK: # %bb.0: # %entry 1252; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 1253; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t 1254; CHECK-NEXT: ret 1255entry: 1256 call void @llvm.riscv.vsse.mask.nxv4f16( 1257 <vscale x 4 x half> %0, 1258 ptr %1, 1259 iXLen %2, 1260 <vscale x 4 x i1> %3, 1261 iXLen %4) 1262 1263 ret void 1264} 1265 1266declare void @llvm.riscv.vsse.nxv8f16( 1267 <vscale x 8 x half>, 1268 ptr, 1269 iXLen, 1270 iXLen); 1271 1272define void @intrinsic_vsse_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 1273; CHECK-LABEL: intrinsic_vsse_v_nxv8f16_nxv8f16: 1274; CHECK: # %bb.0: # %entry 1275; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma 1276; CHECK-NEXT: vsse16.v v8, (a0), a1 1277; CHECK-NEXT: ret 1278entry: 1279 call void @llvm.riscv.vsse.nxv8f16( 1280 <vscale x 8 x half> %0, 1281 ptr %1, 1282 iXLen %2, 1283 iXLen %3) 1284 1285 ret void 1286} 1287 1288declare void @llvm.riscv.vsse.mask.nxv8f16( 1289 <vscale x 8 x half>, 1290 ptr, 1291 iXLen, 1292 <vscale x 8 x i1>, 1293 iXLen); 1294 1295define void @intrinsic_vsse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1296; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f16_nxv8f16: 1297; CHECK: # %bb.0: # %entry 1298; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma 1299; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t 1300; CHECK-NEXT: ret 1301entry: 1302 call void @llvm.riscv.vsse.mask.nxv8f16( 1303 <vscale x 8 x half> %0, 1304 ptr %1, 1305 iXLen %2, 1306 <vscale x 8 x i1> %3, 1307 iXLen %4) 1308 1309 ret void 1310} 1311 1312declare void @llvm.riscv.vsse.nxv16f16( 1313 <vscale x 16 x half>, 1314 ptr, 1315 iXLen, 1316 iXLen); 1317 1318define void @intrinsic_vsse_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 1319; CHECK-LABEL: intrinsic_vsse_v_nxv16f16_nxv16f16: 1320; CHECK: # %bb.0: # %entry 1321; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma 1322; CHECK-NEXT: vsse16.v v8, (a0), a1 1323; CHECK-NEXT: ret 1324entry: 1325 call void @llvm.riscv.vsse.nxv16f16( 1326 <vscale x 16 x half> %0, 1327 ptr %1, 1328 iXLen %2, 1329 iXLen %3) 1330 1331 ret void 1332} 1333 1334declare void @llvm.riscv.vsse.mask.nxv16f16( 1335 <vscale x 16 x half>, 1336 ptr, 1337 iXLen, 1338 <vscale x 16 x i1>, 1339 iXLen); 1340 1341define void @intrinsic_vsse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 1342; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16f16_nxv16f16: 1343; CHECK: # %bb.0: # %entry 1344; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma 1345; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t 1346; CHECK-NEXT: ret 1347entry: 1348 call void @llvm.riscv.vsse.mask.nxv16f16( 1349 <vscale x 16 x half> %0, 1350 ptr %1, 1351 iXLen %2, 1352 <vscale x 16 x i1> %3, 1353 iXLen %4) 1354 1355 ret void 1356} 1357 1358declare void @llvm.riscv.vsse.nxv32f16( 1359 <vscale x 32 x half>, 1360 ptr, 1361 iXLen, 1362 iXLen); 1363 1364define void @intrinsic_vsse_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 1365; CHECK-LABEL: intrinsic_vsse_v_nxv32f16_nxv32f16: 1366; CHECK: # %bb.0: # %entry 1367; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma 1368; CHECK-NEXT: vsse16.v v8, (a0), a1 1369; CHECK-NEXT: ret 1370entry: 1371 call void @llvm.riscv.vsse.nxv32f16( 1372 <vscale x 32 x half> %0, 1373 ptr %1, 1374 iXLen %2, 1375 iXLen %3) 1376 1377 ret void 1378} 1379 1380declare void @llvm.riscv.vsse.mask.nxv32f16( 1381 <vscale x 32 x half>, 1382 ptr, 1383 iXLen, 1384 <vscale x 32 x i1>, 1385 iXLen); 1386 1387define void @intrinsic_vsse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, ptr %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 1388; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32f16_nxv32f16: 1389; CHECK: # %bb.0: # %entry 1390; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma 1391; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t 1392; CHECK-NEXT: ret 1393entry: 1394 call void @llvm.riscv.vsse.mask.nxv32f16( 1395 <vscale x 32 x half> %0, 1396 ptr %1, 1397 iXLen %2, 1398 <vscale x 32 x i1> %3, 1399 iXLen %4) 1400 1401 ret void 1402} 1403 1404declare void @llvm.riscv.vsse.nxv1bf16( 1405 <vscale x 1 x bfloat>, 1406 ptr, 1407 iXLen, 1408 iXLen); 1409 1410define void @intrinsic_vsse_v_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 1411; CHECK-LABEL: intrinsic_vsse_v_nxv1bf16_nxv1bf16: 1412; CHECK: # %bb.0: # %entry 1413; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 1414; CHECK-NEXT: vsse16.v v8, (a0), a1 1415; CHECK-NEXT: ret 1416entry: 1417 call void @llvm.riscv.vsse.nxv1bf16( 1418 <vscale x 1 x bfloat> %0, 1419 ptr %1, 1420 iXLen %2, 1421 iXLen %3) 1422 1423 ret void 1424} 1425 1426declare void @llvm.riscv.vsse.mask.nxv1bf16( 1427 <vscale x 1 x bfloat>, 1428 ptr, 1429 iXLen, 1430 <vscale x 1 x i1>, 1431 iXLen); 1432 1433define void @intrinsic_vsse_mask_v_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 1434; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1bf16_nxv1bf16: 1435; CHECK: # %bb.0: # %entry 1436; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 1437; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t 1438; CHECK-NEXT: ret 1439entry: 1440 call void @llvm.riscv.vsse.mask.nxv1bf16( 1441 <vscale x 1 x bfloat> %0, 1442 ptr %1, 1443 iXLen %2, 1444 <vscale x 1 x i1> %3, 1445 iXLen %4) 1446 1447 ret void 1448} 1449 1450declare void @llvm.riscv.vsse.nxv2bf16( 1451 <vscale x 2 x bfloat>, 1452 ptr, 1453 iXLen, 1454 iXLen); 1455 1456define void @intrinsic_vsse_v_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 1457; CHECK-LABEL: intrinsic_vsse_v_nxv2bf16_nxv2bf16: 1458; CHECK: # %bb.0: # %entry 1459; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 1460; CHECK-NEXT: vsse16.v v8, (a0), a1 1461; CHECK-NEXT: ret 1462entry: 1463 call void @llvm.riscv.vsse.nxv2bf16( 1464 <vscale x 2 x bfloat> %0, 1465 ptr %1, 1466 iXLen %2, 1467 iXLen %3) 1468 1469 ret void 1470} 1471 1472declare void @llvm.riscv.vsse.mask.nxv2bf16( 1473 <vscale x 2 x bfloat>, 1474 ptr, 1475 iXLen, 1476 <vscale x 2 x i1>, 1477 iXLen); 1478 1479define void @intrinsic_vsse_mask_v_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 1480; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2bf16_nxv2bf16: 1481; CHECK: # %bb.0: # %entry 1482; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 1483; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t 1484; CHECK-NEXT: ret 1485entry: 1486 call void @llvm.riscv.vsse.mask.nxv2bf16( 1487 <vscale x 2 x bfloat> %0, 1488 ptr %1, 1489 iXLen %2, 1490 <vscale x 2 x i1> %3, 1491 iXLen %4) 1492 1493 ret void 1494} 1495 1496declare void @llvm.riscv.vsse.nxv4bf16( 1497 <vscale x 4 x bfloat>, 1498 ptr, 1499 iXLen, 1500 iXLen); 1501 1502define void @intrinsic_vsse_v_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 1503; CHECK-LABEL: intrinsic_vsse_v_nxv4bf16_nxv4bf16: 1504; CHECK: # %bb.0: # %entry 1505; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 1506; CHECK-NEXT: vsse16.v v8, (a0), a1 1507; CHECK-NEXT: ret 1508entry: 1509 call void @llvm.riscv.vsse.nxv4bf16( 1510 <vscale x 4 x bfloat> %0, 1511 ptr %1, 1512 iXLen %2, 1513 iXLen %3) 1514 1515 ret void 1516} 1517 1518declare void @llvm.riscv.vsse.mask.nxv4bf16( 1519 <vscale x 4 x bfloat>, 1520 ptr, 1521 iXLen, 1522 <vscale x 4 x i1>, 1523 iXLen); 1524 1525define void @intrinsic_vsse_mask_v_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1526; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4bf16_nxv4bf16: 1527; CHECK: # %bb.0: # %entry 1528; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 1529; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t 1530; CHECK-NEXT: ret 1531entry: 1532 call void @llvm.riscv.vsse.mask.nxv4bf16( 1533 <vscale x 4 x bfloat> %0, 1534 ptr %1, 1535 iXLen %2, 1536 <vscale x 4 x i1> %3, 1537 iXLen %4) 1538 1539 ret void 1540} 1541 1542declare void @llvm.riscv.vsse.nxv8bf16( 1543 <vscale x 8 x bfloat>, 1544 ptr, 1545 iXLen, 1546 iXLen); 1547 1548define void @intrinsic_vsse_v_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 1549; CHECK-LABEL: intrinsic_vsse_v_nxv8bf16_nxv8bf16: 1550; CHECK: # %bb.0: # %entry 1551; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma 1552; CHECK-NEXT: vsse16.v v8, (a0), a1 1553; CHECK-NEXT: ret 1554entry: 1555 call void @llvm.riscv.vsse.nxv8bf16( 1556 <vscale x 8 x bfloat> %0, 1557 ptr %1, 1558 iXLen %2, 1559 iXLen %3) 1560 1561 ret void 1562} 1563 1564declare void @llvm.riscv.vsse.mask.nxv8bf16( 1565 <vscale x 8 x bfloat>, 1566 ptr, 1567 iXLen, 1568 <vscale x 8 x i1>, 1569 iXLen); 1570 1571define void @intrinsic_vsse_mask_v_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1572; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8bf16_nxv8bf16: 1573; CHECK: # %bb.0: # %entry 1574; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma 1575; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t 1576; CHECK-NEXT: ret 1577entry: 1578 call void @llvm.riscv.vsse.mask.nxv8bf16( 1579 <vscale x 8 x bfloat> %0, 1580 ptr %1, 1581 iXLen %2, 1582 <vscale x 8 x i1> %3, 1583 iXLen %4) 1584 1585 ret void 1586} 1587 1588declare void @llvm.riscv.vsse.nxv16bf16( 1589 <vscale x 16 x bfloat>, 1590 ptr, 1591 iXLen, 1592 iXLen); 1593 1594define void @intrinsic_vsse_v_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 1595; CHECK-LABEL: intrinsic_vsse_v_nxv16bf16_nxv16bf16: 1596; CHECK: # %bb.0: # %entry 1597; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma 1598; CHECK-NEXT: vsse16.v v8, (a0), a1 1599; CHECK-NEXT: ret 1600entry: 1601 call void @llvm.riscv.vsse.nxv16bf16( 1602 <vscale x 16 x bfloat> %0, 1603 ptr %1, 1604 iXLen %2, 1605 iXLen %3) 1606 1607 ret void 1608} 1609 1610declare void @llvm.riscv.vsse.mask.nxv16bf16( 1611 <vscale x 16 x bfloat>, 1612 ptr, 1613 iXLen, 1614 <vscale x 16 x i1>, 1615 iXLen); 1616 1617define void @intrinsic_vsse_mask_v_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 1618; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16bf16_nxv16bf16: 1619; CHECK: # %bb.0: # %entry 1620; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma 1621; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t 1622; CHECK-NEXT: ret 1623entry: 1624 call void @llvm.riscv.vsse.mask.nxv16bf16( 1625 <vscale x 16 x bfloat> %0, 1626 ptr %1, 1627 iXLen %2, 1628 <vscale x 16 x i1> %3, 1629 iXLen %4) 1630 1631 ret void 1632} 1633 1634declare void @llvm.riscv.vsse.nxv32bf16( 1635 <vscale x 32 x bfloat>, 1636 ptr, 1637 iXLen, 1638 iXLen); 1639 1640define void @intrinsic_vsse_v_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 1641; CHECK-LABEL: intrinsic_vsse_v_nxv32bf16_nxv32bf16: 1642; CHECK: # %bb.0: # %entry 1643; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma 1644; CHECK-NEXT: vsse16.v v8, (a0), a1 1645; CHECK-NEXT: ret 1646entry: 1647 call void @llvm.riscv.vsse.nxv32bf16( 1648 <vscale x 32 x bfloat> %0, 1649 ptr %1, 1650 iXLen %2, 1651 iXLen %3) 1652 1653 ret void 1654} 1655 1656declare void @llvm.riscv.vsse.mask.nxv32bf16( 1657 <vscale x 32 x bfloat>, 1658 ptr, 1659 iXLen, 1660 <vscale x 32 x i1>, 1661 iXLen); 1662 1663define void @intrinsic_vsse_mask_v_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, ptr %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 1664; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32bf16_nxv32bf16: 1665; CHECK: # %bb.0: # %entry 1666; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma 1667; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t 1668; CHECK-NEXT: ret 1669entry: 1670 call void @llvm.riscv.vsse.mask.nxv32bf16( 1671 <vscale x 32 x bfloat> %0, 1672 ptr %1, 1673 iXLen %2, 1674 <vscale x 32 x i1> %3, 1675 iXLen %4) 1676 1677 ret void 1678} 1679 1680declare void @llvm.riscv.vsse.nxv1i8( 1681 <vscale x 1 x i8>, 1682 ptr, 1683 iXLen, 1684 iXLen); 1685 1686define void @intrinsic_vsse_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 1687; CHECK-LABEL: intrinsic_vsse_v_nxv1i8_nxv1i8: 1688; CHECK: # %bb.0: # %entry 1689; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma 1690; CHECK-NEXT: vsse8.v v8, (a0), a1 1691; CHECK-NEXT: ret 1692entry: 1693 call void @llvm.riscv.vsse.nxv1i8( 1694 <vscale x 1 x i8> %0, 1695 ptr %1, 1696 iXLen %2, 1697 iXLen %3) 1698 1699 ret void 1700} 1701 1702declare void @llvm.riscv.vsse.mask.nxv1i8( 1703 <vscale x 1 x i8>, 1704 ptr, 1705 iXLen, 1706 <vscale x 1 x i1>, 1707 iXLen); 1708 1709define void @intrinsic_vsse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 1710; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i8_nxv1i8: 1711; CHECK: # %bb.0: # %entry 1712; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma 1713; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t 1714; CHECK-NEXT: ret 1715entry: 1716 call void @llvm.riscv.vsse.mask.nxv1i8( 1717 <vscale x 1 x i8> %0, 1718 ptr %1, 1719 iXLen %2, 1720 <vscale x 1 x i1> %3, 1721 iXLen %4) 1722 1723 ret void 1724} 1725 1726declare void @llvm.riscv.vsse.nxv2i8( 1727 <vscale x 2 x i8>, 1728 ptr, 1729 iXLen, 1730 iXLen); 1731 1732define void @intrinsic_vsse_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 1733; CHECK-LABEL: intrinsic_vsse_v_nxv2i8_nxv2i8: 1734; CHECK: # %bb.0: # %entry 1735; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma 1736; CHECK-NEXT: vsse8.v v8, (a0), a1 1737; CHECK-NEXT: ret 1738entry: 1739 call void @llvm.riscv.vsse.nxv2i8( 1740 <vscale x 2 x i8> %0, 1741 ptr %1, 1742 iXLen %2, 1743 iXLen %3) 1744 1745 ret void 1746} 1747 1748declare void @llvm.riscv.vsse.mask.nxv2i8( 1749 <vscale x 2 x i8>, 1750 ptr, 1751 iXLen, 1752 <vscale x 2 x i1>, 1753 iXLen); 1754 1755define void @intrinsic_vsse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 1756; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i8_nxv2i8: 1757; CHECK: # %bb.0: # %entry 1758; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma 1759; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t 1760; CHECK-NEXT: ret 1761entry: 1762 call void @llvm.riscv.vsse.mask.nxv2i8( 1763 <vscale x 2 x i8> %0, 1764 ptr %1, 1765 iXLen %2, 1766 <vscale x 2 x i1> %3, 1767 iXLen %4) 1768 1769 ret void 1770} 1771 1772declare void @llvm.riscv.vsse.nxv4i8( 1773 <vscale x 4 x i8>, 1774 ptr, 1775 iXLen, 1776 iXLen); 1777 1778define void @intrinsic_vsse_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 1779; CHECK-LABEL: intrinsic_vsse_v_nxv4i8_nxv4i8: 1780; CHECK: # %bb.0: # %entry 1781; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma 1782; CHECK-NEXT: vsse8.v v8, (a0), a1 1783; CHECK-NEXT: ret 1784entry: 1785 call void @llvm.riscv.vsse.nxv4i8( 1786 <vscale x 4 x i8> %0, 1787 ptr %1, 1788 iXLen %2, 1789 iXLen %3) 1790 1791 ret void 1792} 1793 1794declare void @llvm.riscv.vsse.mask.nxv4i8( 1795 <vscale x 4 x i8>, 1796 ptr, 1797 iXLen, 1798 <vscale x 4 x i1>, 1799 iXLen); 1800 1801define void @intrinsic_vsse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1802; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i8_nxv4i8: 1803; CHECK: # %bb.0: # %entry 1804; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma 1805; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t 1806; CHECK-NEXT: ret 1807entry: 1808 call void @llvm.riscv.vsse.mask.nxv4i8( 1809 <vscale x 4 x i8> %0, 1810 ptr %1, 1811 iXLen %2, 1812 <vscale x 4 x i1> %3, 1813 iXLen %4) 1814 1815 ret void 1816} 1817 1818declare void @llvm.riscv.vsse.nxv8i8( 1819 <vscale x 8 x i8>, 1820 ptr, 1821 iXLen, 1822 iXLen); 1823 1824define void @intrinsic_vsse_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 1825; CHECK-LABEL: intrinsic_vsse_v_nxv8i8_nxv8i8: 1826; CHECK: # %bb.0: # %entry 1827; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma 1828; CHECK-NEXT: vsse8.v v8, (a0), a1 1829; CHECK-NEXT: ret 1830entry: 1831 call void @llvm.riscv.vsse.nxv8i8( 1832 <vscale x 8 x i8> %0, 1833 ptr %1, 1834 iXLen %2, 1835 iXLen %3) 1836 1837 ret void 1838} 1839 1840declare void @llvm.riscv.vsse.mask.nxv8i8( 1841 <vscale x 8 x i8>, 1842 ptr, 1843 iXLen, 1844 <vscale x 8 x i1>, 1845 iXLen); 1846 1847define void @intrinsic_vsse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1848; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i8_nxv8i8: 1849; CHECK: # %bb.0: # %entry 1850; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma 1851; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t 1852; CHECK-NEXT: ret 1853entry: 1854 call void @llvm.riscv.vsse.mask.nxv8i8( 1855 <vscale x 8 x i8> %0, 1856 ptr %1, 1857 iXLen %2, 1858 <vscale x 8 x i1> %3, 1859 iXLen %4) 1860 1861 ret void 1862} 1863 1864declare void @llvm.riscv.vsse.nxv16i8( 1865 <vscale x 16 x i8>, 1866 ptr, 1867 iXLen, 1868 iXLen); 1869 1870define void @intrinsic_vsse_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 1871; CHECK-LABEL: intrinsic_vsse_v_nxv16i8_nxv16i8: 1872; CHECK: # %bb.0: # %entry 1873; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma 1874; CHECK-NEXT: vsse8.v v8, (a0), a1 1875; CHECK-NEXT: ret 1876entry: 1877 call void @llvm.riscv.vsse.nxv16i8( 1878 <vscale x 16 x i8> %0, 1879 ptr %1, 1880 iXLen %2, 1881 iXLen %3) 1882 1883 ret void 1884} 1885 1886declare void @llvm.riscv.vsse.mask.nxv16i8( 1887 <vscale x 16 x i8>, 1888 ptr, 1889 iXLen, 1890 <vscale x 16 x i1>, 1891 iXLen); 1892 1893define void @intrinsic_vsse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 1894; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i8_nxv16i8: 1895; CHECK: # %bb.0: # %entry 1896; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma 1897; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t 1898; CHECK-NEXT: ret 1899entry: 1900 call void @llvm.riscv.vsse.mask.nxv16i8( 1901 <vscale x 16 x i8> %0, 1902 ptr %1, 1903 iXLen %2, 1904 <vscale x 16 x i1> %3, 1905 iXLen %4) 1906 1907 ret void 1908} 1909 1910declare void @llvm.riscv.vsse.nxv32i8( 1911 <vscale x 32 x i8>, 1912 ptr, 1913 iXLen, 1914 iXLen); 1915 1916define void @intrinsic_vsse_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 1917; CHECK-LABEL: intrinsic_vsse_v_nxv32i8_nxv32i8: 1918; CHECK: # %bb.0: # %entry 1919; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma 1920; CHECK-NEXT: vsse8.v v8, (a0), a1 1921; CHECK-NEXT: ret 1922entry: 1923 call void @llvm.riscv.vsse.nxv32i8( 1924 <vscale x 32 x i8> %0, 1925 ptr %1, 1926 iXLen %2, 1927 iXLen %3) 1928 1929 ret void 1930} 1931 1932declare void @llvm.riscv.vsse.mask.nxv32i8( 1933 <vscale x 32 x i8>, 1934 ptr, 1935 iXLen, 1936 <vscale x 32 x i1>, 1937 iXLen); 1938 1939define void @intrinsic_vsse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 1940; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32i8_nxv32i8: 1941; CHECK: # %bb.0: # %entry 1942; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma 1943; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t 1944; CHECK-NEXT: ret 1945entry: 1946 call void @llvm.riscv.vsse.mask.nxv32i8( 1947 <vscale x 32 x i8> %0, 1948 ptr %1, 1949 iXLen %2, 1950 <vscale x 32 x i1> %3, 1951 iXLen %4) 1952 1953 ret void 1954} 1955 1956declare void @llvm.riscv.vsse.nxv64i8( 1957 <vscale x 64 x i8>, 1958 ptr, 1959 iXLen, 1960 iXLen); 1961 1962define void @intrinsic_vsse_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, iXLen %2, iXLen %3) nounwind { 1963; CHECK-LABEL: intrinsic_vsse_v_nxv64i8_nxv64i8: 1964; CHECK: # %bb.0: # %entry 1965; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma 1966; CHECK-NEXT: vsse8.v v8, (a0), a1 1967; CHECK-NEXT: ret 1968entry: 1969 call void @llvm.riscv.vsse.nxv64i8( 1970 <vscale x 64 x i8> %0, 1971 ptr %1, 1972 iXLen %2, 1973 iXLen %3) 1974 1975 ret void 1976} 1977 1978declare void @llvm.riscv.vsse.mask.nxv64i8( 1979 <vscale x 64 x i8>, 1980 ptr, 1981 iXLen, 1982 <vscale x 64 x i1>, 1983 iXLen); 1984 1985define void @intrinsic_vsse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, iXLen %2, <vscale x 64 x i1> %3, iXLen %4) nounwind { 1986; CHECK-LABEL: intrinsic_vsse_mask_v_nxv64i8_nxv64i8: 1987; CHECK: # %bb.0: # %entry 1988; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma 1989; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t 1990; CHECK-NEXT: ret 1991entry: 1992 call void @llvm.riscv.vsse.mask.nxv64i8( 1993 <vscale x 64 x i8> %0, 1994 ptr %1, 1995 iXLen %2, 1996 <vscale x 64 x i1> %3, 1997 iXLen %4) 1998 1999 ret void 2000} 2001