1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+d,+zvfh \ 3; RUN: -verify-machineinstrs | FileCheck %s 4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d,+zvfh \ 5; RUN: -verify-machineinstrs | FileCheck %s 6 7declare <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8( 8 <vscale x 1 x i8>, 9 <vscale x 1 x i8>, 10 iXLen, 11 iXLen, 12 iXLen); 13 14define <vscale x 1 x i8> @intrinsic_vslideup_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2, iXLen %3) nounwind { 15; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8: 16; CHECK: # %bb.0: # %entry 17; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 18; CHECK-NEXT: vslideup.vx v8, v9, a0 19; CHECK-NEXT: ret 20entry: 21 %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8( 22 <vscale x 1 x i8> %0, 23 <vscale x 1 x i8> %1, 24 iXLen %2, 25 iXLen %3, 26 iXLen 1) 27 28 ret <vscale x 1 x i8> %a 29} 30 31declare <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8( 32 <vscale x 1 x i8>, 33 <vscale x 1 x i8>, 34 iXLen, 35 <vscale x 1 x i1>, 36 iXLen, iXLen); 37 38define <vscale x 1 x i8> @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 39; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8: 40; CHECK: # %bb.0: # %entry 41; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu 42; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t 43; CHECK-NEXT: ret 44entry: 45 %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8( 46 <vscale x 1 x i8> %0, 47 <vscale x 1 x i8> %1, 48 iXLen %2, 49 <vscale x 1 x i1> %3, 50 iXLen %4, iXLen 0) 51 52 ret <vscale x 1 x i8> %a 53} 54 55define <vscale x 1 x i8> @intrinsic_vslideup_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind { 56; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i8_nxv1i8: 57; CHECK: # %bb.0: # %entry 58; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 59; CHECK-NEXT: vslideup.vi v8, v9, 9 60; CHECK-NEXT: ret 61entry: 62 %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8( 63 <vscale x 1 x i8> %0, 64 <vscale x 1 x i8> %1, 65 iXLen 9, 66 iXLen %2, 67 iXLen 1) 68 69 ret <vscale x 1 x i8> %a 70} 71 72define <vscale x 1 x i8> @intrinsic_vslideup_mask_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind { 73; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i8_nxv1i8: 74; CHECK: # %bb.0: # %entry 75; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu 76; CHECK-NEXT: vslideup.vi v8, v9, 9, v0.t 77; CHECK-NEXT: ret 78entry: 79 %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8( 80 <vscale x 1 x i8> %0, 81 <vscale x 1 x i8> %1, 82 iXLen 9, 83 <vscale x 1 x i1> %2, 84 iXLen %3, iXLen 0) 85 86 ret <vscale x 1 x i8> %a 87} 88 89declare <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8( 90 <vscale x 2 x i8>, 91 <vscale x 2 x i8>, 92 iXLen, 93 iXLen, 94 iXLen); 95 96define <vscale x 2 x i8> @intrinsic_vslideup_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2, iXLen %3) nounwind { 97; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i8_nxv2i8: 98; CHECK: # %bb.0: # %entry 99; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 100; CHECK-NEXT: vslideup.vx v8, v9, a0 101; CHECK-NEXT: ret 102entry: 103 %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8( 104 <vscale x 2 x i8> %0, 105 <vscale x 2 x i8> %1, 106 iXLen %2, 107 iXLen %3, 108 iXLen 1) 109 110 ret <vscale x 2 x i8> %a 111} 112 113declare <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8( 114 <vscale x 2 x i8>, 115 <vscale x 2 x i8>, 116 iXLen, 117 <vscale x 2 x i1>, 118 iXLen, iXLen); 119 120define <vscale x 2 x i8> @intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 121; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8: 122; CHECK: # %bb.0: # %entry 123; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu 124; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t 125; CHECK-NEXT: ret 126entry: 127 %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8( 128 <vscale x 2 x i8> %0, 129 <vscale x 2 x i8> %1, 130 iXLen %2, 131 <vscale x 2 x i1> %3, 132 iXLen %4, iXLen 0) 133 134 ret <vscale x 2 x i8> %a 135} 136 137define <vscale x 2 x i8> @intrinsic_vslideup_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind { 138; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i8_nxv2i8: 139; CHECK: # %bb.0: # %entry 140; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 141; CHECK-NEXT: vslideup.vi v8, v9, 9 142; CHECK-NEXT: ret 143entry: 144 %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8( 145 <vscale x 2 x i8> %0, 146 <vscale x 2 x i8> %1, 147 iXLen 9, 148 iXLen %2, 149 iXLen 1) 150 151 ret <vscale x 2 x i8> %a 152} 153 154define <vscale x 2 x i8> @intrinsic_vslideup_mask_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind { 155; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i8_nxv2i8: 156; CHECK: # %bb.0: # %entry 157; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu 158; CHECK-NEXT: vslideup.vi v8, v9, 9, v0.t 159; CHECK-NEXT: ret 160entry: 161 %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8( 162 <vscale x 2 x i8> %0, 163 <vscale x 2 x i8> %1, 164 iXLen 9, 165 <vscale x 2 x i1> %2, 166 iXLen %3, iXLen 0) 167 168 ret <vscale x 2 x i8> %a 169} 170 171declare <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8( 172 <vscale x 4 x i8>, 173 <vscale x 4 x i8>, 174 iXLen, 175 iXLen, 176 iXLen); 177 178define <vscale x 4 x i8> @intrinsic_vslideup_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2, iXLen %3) nounwind { 179; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i8_nxv4i8: 180; CHECK: # %bb.0: # %entry 181; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 182; CHECK-NEXT: vslideup.vx v8, v9, a0 183; CHECK-NEXT: ret 184entry: 185 %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8( 186 <vscale x 4 x i8> %0, 187 <vscale x 4 x i8> %1, 188 iXLen %2, 189 iXLen %3, 190 iXLen 1) 191 192 ret <vscale x 4 x i8> %a 193} 194 195declare <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8( 196 <vscale x 4 x i8>, 197 <vscale x 4 x i8>, 198 iXLen, 199 <vscale x 4 x i1>, 200 iXLen, iXLen); 201 202define <vscale x 4 x i8> @intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 203; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8: 204; CHECK: # %bb.0: # %entry 205; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu 206; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t 207; CHECK-NEXT: ret 208entry: 209 %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8( 210 <vscale x 4 x i8> %0, 211 <vscale x 4 x i8> %1, 212 iXLen %2, 213 <vscale x 4 x i1> %3, 214 iXLen %4, iXLen 0) 215 216 ret <vscale x 4 x i8> %a 217} 218 219define <vscale x 4 x i8> @intrinsic_vslideup_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind { 220; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i8_nxv4i8: 221; CHECK: # %bb.0: # %entry 222; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma 223; CHECK-NEXT: vslideup.vi v8, v9, 9 224; CHECK-NEXT: ret 225entry: 226 %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8( 227 <vscale x 4 x i8> %0, 228 <vscale x 4 x i8> %1, 229 iXLen 9, 230 iXLen %2, 231 iXLen 1) 232 233 ret <vscale x 4 x i8> %a 234} 235 236define <vscale x 4 x i8> @intrinsic_vslideup_mask_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind { 237; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i8_nxv4i8: 238; CHECK: # %bb.0: # %entry 239; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu 240; CHECK-NEXT: vslideup.vi v8, v9, 9, v0.t 241; CHECK-NEXT: ret 242entry: 243 %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8( 244 <vscale x 4 x i8> %0, 245 <vscale x 4 x i8> %1, 246 iXLen 9, 247 <vscale x 4 x i1> %2, 248 iXLen %3, iXLen 0) 249 250 ret <vscale x 4 x i8> %a 251} 252 253declare <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8( 254 <vscale x 8 x i8>, 255 <vscale x 8 x i8>, 256 iXLen, 257 iXLen, 258 iXLen); 259 260define <vscale x 8 x i8> @intrinsic_vslideup_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2, iXLen %3) nounwind { 261; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i8_nxv8i8: 262; CHECK: # %bb.0: # %entry 263; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 264; CHECK-NEXT: vslideup.vx v8, v9, a0 265; CHECK-NEXT: ret 266entry: 267 %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8( 268 <vscale x 8 x i8> %0, 269 <vscale x 8 x i8> %1, 270 iXLen %2, 271 iXLen %3, 272 iXLen 1) 273 274 ret <vscale x 8 x i8> %a 275} 276 277declare <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8( 278 <vscale x 8 x i8>, 279 <vscale x 8 x i8>, 280 iXLen, 281 <vscale x 8 x i1>, 282 iXLen, iXLen); 283 284define <vscale x 8 x i8> @intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 285; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8: 286; CHECK: # %bb.0: # %entry 287; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu 288; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t 289; CHECK-NEXT: ret 290entry: 291 %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8( 292 <vscale x 8 x i8> %0, 293 <vscale x 8 x i8> %1, 294 iXLen %2, 295 <vscale x 8 x i1> %3, 296 iXLen %4, iXLen 0) 297 298 ret <vscale x 8 x i8> %a 299} 300 301define <vscale x 8 x i8> @intrinsic_vslideup_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind { 302; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i8_nxv8i8: 303; CHECK: # %bb.0: # %entry 304; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 305; CHECK-NEXT: vslideup.vi v8, v9, 9 306; CHECK-NEXT: ret 307entry: 308 %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8( 309 <vscale x 8 x i8> %0, 310 <vscale x 8 x i8> %1, 311 iXLen 9, 312 iXLen %2, 313 iXLen 1) 314 315 ret <vscale x 8 x i8> %a 316} 317 318define <vscale x 8 x i8> @intrinsic_vslideup_mask_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind { 319; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i8_nxv8i8: 320; CHECK: # %bb.0: # %entry 321; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu 322; CHECK-NEXT: vslideup.vi v8, v9, 9, v0.t 323; CHECK-NEXT: ret 324entry: 325 %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8( 326 <vscale x 8 x i8> %0, 327 <vscale x 8 x i8> %1, 328 iXLen 9, 329 <vscale x 8 x i1> %2, 330 iXLen %3, iXLen 0) 331 332 ret <vscale x 8 x i8> %a 333} 334 335declare <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8( 336 <vscale x 16 x i8>, 337 <vscale x 16 x i8>, 338 iXLen, 339 iXLen, 340 iXLen); 341 342define <vscale x 16 x i8> @intrinsic_vslideup_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2, iXLen %3) nounwind { 343; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i8_nxv16i8: 344; CHECK: # %bb.0: # %entry 345; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 346; CHECK-NEXT: vslideup.vx v8, v10, a0 347; CHECK-NEXT: ret 348entry: 349 %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8( 350 <vscale x 16 x i8> %0, 351 <vscale x 16 x i8> %1, 352 iXLen %2, 353 iXLen %3, 354 iXLen 1) 355 356 ret <vscale x 16 x i8> %a 357} 358 359declare <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8( 360 <vscale x 16 x i8>, 361 <vscale x 16 x i8>, 362 iXLen, 363 <vscale x 16 x i1>, 364 iXLen, iXLen); 365 366define <vscale x 16 x i8> @intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 367; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8: 368; CHECK: # %bb.0: # %entry 369; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu 370; CHECK-NEXT: vslideup.vx v8, v10, a0, v0.t 371; CHECK-NEXT: ret 372entry: 373 %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8( 374 <vscale x 16 x i8> %0, 375 <vscale x 16 x i8> %1, 376 iXLen %2, 377 <vscale x 16 x i1> %3, 378 iXLen %4, iXLen 0) 379 380 ret <vscale x 16 x i8> %a 381} 382 383define <vscale x 16 x i8> @intrinsic_vslideup_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind { 384; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i8_nxv16i8: 385; CHECK: # %bb.0: # %entry 386; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma 387; CHECK-NEXT: vslideup.vi v8, v10, 9 388; CHECK-NEXT: ret 389entry: 390 %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8( 391 <vscale x 16 x i8> %0, 392 <vscale x 16 x i8> %1, 393 iXLen 9, 394 iXLen %2, 395 iXLen 1) 396 397 ret <vscale x 16 x i8> %a 398} 399 400define <vscale x 16 x i8> @intrinsic_vslideup_mask_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind { 401; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16i8_nxv16i8: 402; CHECK: # %bb.0: # %entry 403; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu 404; CHECK-NEXT: vslideup.vi v8, v10, 9, v0.t 405; CHECK-NEXT: ret 406entry: 407 %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8( 408 <vscale x 16 x i8> %0, 409 <vscale x 16 x i8> %1, 410 iXLen 9, 411 <vscale x 16 x i1> %2, 412 iXLen %3, iXLen 0) 413 414 ret <vscale x 16 x i8> %a 415} 416 417declare <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8( 418 <vscale x 32 x i8>, 419 <vscale x 32 x i8>, 420 iXLen, 421 iXLen, 422 iXLen); 423 424define <vscale x 32 x i8> @intrinsic_vslideup_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2, iXLen %3) nounwind { 425; CHECK-LABEL: intrinsic_vslideup_vx_nxv32i8_nxv32i8: 426; CHECK: # %bb.0: # %entry 427; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma 428; CHECK-NEXT: vslideup.vx v8, v12, a0 429; CHECK-NEXT: ret 430entry: 431 %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8( 432 <vscale x 32 x i8> %0, 433 <vscale x 32 x i8> %1, 434 iXLen %2, 435 iXLen %3, 436 iXLen 1) 437 438 ret <vscale x 32 x i8> %a 439} 440 441declare <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8( 442 <vscale x 32 x i8>, 443 <vscale x 32 x i8>, 444 iXLen, 445 <vscale x 32 x i1>, 446 iXLen, iXLen); 447 448define <vscale x 32 x i8> @intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 449; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8: 450; CHECK: # %bb.0: # %entry 451; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu 452; CHECK-NEXT: vslideup.vx v8, v12, a0, v0.t 453; CHECK-NEXT: ret 454entry: 455 %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8( 456 <vscale x 32 x i8> %0, 457 <vscale x 32 x i8> %1, 458 iXLen %2, 459 <vscale x 32 x i1> %3, 460 iXLen %4, iXLen 0) 461 462 ret <vscale x 32 x i8> %a 463} 464 465define <vscale x 32 x i8> @intrinsic_vslideup_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind { 466; CHECK-LABEL: intrinsic_vslideup_vi_nxv32i8_nxv32i8: 467; CHECK: # %bb.0: # %entry 468; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma 469; CHECK-NEXT: vslideup.vi v8, v12, 9 470; CHECK-NEXT: ret 471entry: 472 %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8( 473 <vscale x 32 x i8> %0, 474 <vscale x 32 x i8> %1, 475 iXLen 9, 476 iXLen %2, 477 iXLen 1) 478 479 ret <vscale x 32 x i8> %a 480} 481 482define <vscale x 32 x i8> @intrinsic_vslideup_mask_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind { 483; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv32i8_nxv32i8: 484; CHECK: # %bb.0: # %entry 485; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu 486; CHECK-NEXT: vslideup.vi v8, v12, 9, v0.t 487; CHECK-NEXT: ret 488entry: 489 %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8( 490 <vscale x 32 x i8> %0, 491 <vscale x 32 x i8> %1, 492 iXLen 9, 493 <vscale x 32 x i1> %2, 494 iXLen %3, iXLen 0) 495 496 ret <vscale x 32 x i8> %a 497} 498 499declare <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16( 500 <vscale x 1 x i16>, 501 <vscale x 1 x i16>, 502 iXLen, 503 iXLen, 504 iXLen); 505 506define <vscale x 1 x i16> @intrinsic_vslideup_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2, iXLen %3) nounwind { 507; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i16_nxv1i16: 508; CHECK: # %bb.0: # %entry 509; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 510; CHECK-NEXT: vslideup.vx v8, v9, a0 511; CHECK-NEXT: ret 512entry: 513 %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16( 514 <vscale x 1 x i16> %0, 515 <vscale x 1 x i16> %1, 516 iXLen %2, 517 iXLen %3, 518 iXLen 1) 519 520 ret <vscale x 1 x i16> %a 521} 522 523declare <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16( 524 <vscale x 1 x i16>, 525 <vscale x 1 x i16>, 526 iXLen, 527 <vscale x 1 x i1>, 528 iXLen, iXLen); 529 530define <vscale x 1 x i16> @intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 531; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16: 532; CHECK: # %bb.0: # %entry 533; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu 534; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t 535; CHECK-NEXT: ret 536entry: 537 %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16( 538 <vscale x 1 x i16> %0, 539 <vscale x 1 x i16> %1, 540 iXLen %2, 541 <vscale x 1 x i1> %3, 542 iXLen %4, iXLen 0) 543 544 ret <vscale x 1 x i16> %a 545} 546 547define <vscale x 1 x i16> @intrinsic_vslideup_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind { 548; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i16_nxv1i16: 549; CHECK: # %bb.0: # %entry 550; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 551; CHECK-NEXT: vslideup.vi v8, v9, 9 552; CHECK-NEXT: ret 553entry: 554 %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16( 555 <vscale x 1 x i16> %0, 556 <vscale x 1 x i16> %1, 557 iXLen 9, 558 iXLen %2, 559 iXLen 1) 560 561 ret <vscale x 1 x i16> %a 562} 563 564define <vscale x 1 x i16> @intrinsic_vslideup_mask_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind { 565; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i16_nxv1i16: 566; CHECK: # %bb.0: # %entry 567; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu 568; CHECK-NEXT: vslideup.vi v8, v9, 9, v0.t 569; CHECK-NEXT: ret 570entry: 571 %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16( 572 <vscale x 1 x i16> %0, 573 <vscale x 1 x i16> %1, 574 iXLen 9, 575 <vscale x 1 x i1> %2, 576 iXLen %3, iXLen 0) 577 578 ret <vscale x 1 x i16> %a 579} 580 581declare <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16( 582 <vscale x 2 x i16>, 583 <vscale x 2 x i16>, 584 iXLen, 585 iXLen, 586 iXLen); 587 588define <vscale x 2 x i16> @intrinsic_vslideup_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2, iXLen %3) nounwind { 589; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i16_nxv2i16: 590; CHECK: # %bb.0: # %entry 591; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 592; CHECK-NEXT: vslideup.vx v8, v9, a0 593; CHECK-NEXT: ret 594entry: 595 %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16( 596 <vscale x 2 x i16> %0, 597 <vscale x 2 x i16> %1, 598 iXLen %2, 599 iXLen %3, 600 iXLen 1) 601 602 ret <vscale x 2 x i16> %a 603} 604 605declare <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16( 606 <vscale x 2 x i16>, 607 <vscale x 2 x i16>, 608 iXLen, 609 <vscale x 2 x i1>, 610 iXLen, iXLen); 611 612define <vscale x 2 x i16> @intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 613; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16: 614; CHECK: # %bb.0: # %entry 615; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu 616; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t 617; CHECK-NEXT: ret 618entry: 619 %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16( 620 <vscale x 2 x i16> %0, 621 <vscale x 2 x i16> %1, 622 iXLen %2, 623 <vscale x 2 x i1> %3, 624 iXLen %4, iXLen 0) 625 626 ret <vscale x 2 x i16> %a 627} 628 629define <vscale x 2 x i16> @intrinsic_vslideup_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind { 630; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i16_nxv2i16: 631; CHECK: # %bb.0: # %entry 632; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 633; CHECK-NEXT: vslideup.vi v8, v9, 9 634; CHECK-NEXT: ret 635entry: 636 %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16( 637 <vscale x 2 x i16> %0, 638 <vscale x 2 x i16> %1, 639 iXLen 9, 640 iXLen %2, 641 iXLen 1) 642 643 ret <vscale x 2 x i16> %a 644} 645 646define <vscale x 2 x i16> @intrinsic_vslideup_mask_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind { 647; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i16_nxv2i16: 648; CHECK: # %bb.0: # %entry 649; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu 650; CHECK-NEXT: vslideup.vi v8, v9, 9, v0.t 651; CHECK-NEXT: ret 652entry: 653 %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16( 654 <vscale x 2 x i16> %0, 655 <vscale x 2 x i16> %1, 656 iXLen 9, 657 <vscale x 2 x i1> %2, 658 iXLen %3, iXLen 0) 659 660 ret <vscale x 2 x i16> %a 661} 662 663declare <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16( 664 <vscale x 4 x i16>, 665 <vscale x 4 x i16>, 666 iXLen, 667 iXLen, 668 iXLen); 669 670define <vscale x 4 x i16> @intrinsic_vslideup_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2, iXLen %3) nounwind { 671; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i16_nxv4i16: 672; CHECK: # %bb.0: # %entry 673; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 674; CHECK-NEXT: vslideup.vx v8, v9, a0 675; CHECK-NEXT: ret 676entry: 677 %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16( 678 <vscale x 4 x i16> %0, 679 <vscale x 4 x i16> %1, 680 iXLen %2, 681 iXLen %3, 682 iXLen 1) 683 684 ret <vscale x 4 x i16> %a 685} 686 687declare <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16( 688 <vscale x 4 x i16>, 689 <vscale x 4 x i16>, 690 iXLen, 691 <vscale x 4 x i1>, 692 iXLen, iXLen); 693 694define <vscale x 4 x i16> @intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 695; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16: 696; CHECK: # %bb.0: # %entry 697; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu 698; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t 699; CHECK-NEXT: ret 700entry: 701 %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16( 702 <vscale x 4 x i16> %0, 703 <vscale x 4 x i16> %1, 704 iXLen %2, 705 <vscale x 4 x i1> %3, 706 iXLen %4, iXLen 0) 707 708 ret <vscale x 4 x i16> %a 709} 710 711define <vscale x 4 x i16> @intrinsic_vslideup_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind { 712; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i16_nxv4i16: 713; CHECK: # %bb.0: # %entry 714; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 715; CHECK-NEXT: vslideup.vi v8, v9, 9 716; CHECK-NEXT: ret 717entry: 718 %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16( 719 <vscale x 4 x i16> %0, 720 <vscale x 4 x i16> %1, 721 iXLen 9, 722 iXLen %2, 723 iXLen 1) 724 725 ret <vscale x 4 x i16> %a 726} 727 728define <vscale x 4 x i16> @intrinsic_vslideup_mask_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind { 729; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i16_nxv4i16: 730; CHECK: # %bb.0: # %entry 731; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu 732; CHECK-NEXT: vslideup.vi v8, v9, 9, v0.t 733; CHECK-NEXT: ret 734entry: 735 %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16( 736 <vscale x 4 x i16> %0, 737 <vscale x 4 x i16> %1, 738 iXLen 9, 739 <vscale x 4 x i1> %2, 740 iXLen %3, iXLen 0) 741 742 ret <vscale x 4 x i16> %a 743} 744 745declare <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16( 746 <vscale x 8 x i16>, 747 <vscale x 8 x i16>, 748 iXLen, 749 iXLen, 750 iXLen); 751 752define <vscale x 8 x i16> @intrinsic_vslideup_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2, iXLen %3) nounwind { 753; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i16_nxv8i16: 754; CHECK: # %bb.0: # %entry 755; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 756; CHECK-NEXT: vslideup.vx v8, v10, a0 757; CHECK-NEXT: ret 758entry: 759 %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16( 760 <vscale x 8 x i16> %0, 761 <vscale x 8 x i16> %1, 762 iXLen %2, 763 iXLen %3, 764 iXLen 1) 765 766 ret <vscale x 8 x i16> %a 767} 768 769declare <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16( 770 <vscale x 8 x i16>, 771 <vscale x 8 x i16>, 772 iXLen, 773 <vscale x 8 x i1>, 774 iXLen, iXLen); 775 776define <vscale x 8 x i16> @intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 777; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16: 778; CHECK: # %bb.0: # %entry 779; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu 780; CHECK-NEXT: vslideup.vx v8, v10, a0, v0.t 781; CHECK-NEXT: ret 782entry: 783 %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16( 784 <vscale x 8 x i16> %0, 785 <vscale x 8 x i16> %1, 786 iXLen %2, 787 <vscale x 8 x i1> %3, 788 iXLen %4, iXLen 0) 789 790 ret <vscale x 8 x i16> %a 791} 792 793define <vscale x 8 x i16> @intrinsic_vslideup_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind { 794; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i16_nxv8i16: 795; CHECK: # %bb.0: # %entry 796; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 797; CHECK-NEXT: vslideup.vi v8, v10, 9 798; CHECK-NEXT: ret 799entry: 800 %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16( 801 <vscale x 8 x i16> %0, 802 <vscale x 8 x i16> %1, 803 iXLen 9, 804 iXLen %2, 805 iXLen 1) 806 807 ret <vscale x 8 x i16> %a 808} 809 810define <vscale x 8 x i16> @intrinsic_vslideup_mask_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind { 811; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i16_nxv8i16: 812; CHECK: # %bb.0: # %entry 813; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu 814; CHECK-NEXT: vslideup.vi v8, v10, 9, v0.t 815; CHECK-NEXT: ret 816entry: 817 %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16( 818 <vscale x 8 x i16> %0, 819 <vscale x 8 x i16> %1, 820 iXLen 9, 821 <vscale x 8 x i1> %2, 822 iXLen %3, iXLen 0) 823 824 ret <vscale x 8 x i16> %a 825} 826 827declare <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16( 828 <vscale x 16 x i16>, 829 <vscale x 16 x i16>, 830 iXLen, 831 iXLen, 832 iXLen); 833 834define <vscale x 16 x i16> @intrinsic_vslideup_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2, iXLen %3) nounwind { 835; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i16_nxv16i16: 836; CHECK: # %bb.0: # %entry 837; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 838; CHECK-NEXT: vslideup.vx v8, v12, a0 839; CHECK-NEXT: ret 840entry: 841 %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16( 842 <vscale x 16 x i16> %0, 843 <vscale x 16 x i16> %1, 844 iXLen %2, 845 iXLen %3, 846 iXLen 1) 847 848 ret <vscale x 16 x i16> %a 849} 850 851declare <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16( 852 <vscale x 16 x i16>, 853 <vscale x 16 x i16>, 854 iXLen, 855 <vscale x 16 x i1>, 856 iXLen, iXLen); 857 858define <vscale x 16 x i16> @intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 859; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16: 860; CHECK: # %bb.0: # %entry 861; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu 862; CHECK-NEXT: vslideup.vx v8, v12, a0, v0.t 863; CHECK-NEXT: ret 864entry: 865 %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16( 866 <vscale x 16 x i16> %0, 867 <vscale x 16 x i16> %1, 868 iXLen %2, 869 <vscale x 16 x i1> %3, 870 iXLen %4, iXLen 0) 871 872 ret <vscale x 16 x i16> %a 873} 874 875define <vscale x 16 x i16> @intrinsic_vslideup_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind { 876; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i16_nxv16i16: 877; CHECK: # %bb.0: # %entry 878; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma 879; CHECK-NEXT: vslideup.vi v8, v12, 9 880; CHECK-NEXT: ret 881entry: 882 %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16( 883 <vscale x 16 x i16> %0, 884 <vscale x 16 x i16> %1, 885 iXLen 9, 886 iXLen %2, 887 iXLen 1) 888 889 ret <vscale x 16 x i16> %a 890} 891 892define <vscale x 16 x i16> @intrinsic_vslideup_mask_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind { 893; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16i16_nxv16i16: 894; CHECK: # %bb.0: # %entry 895; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu 896; CHECK-NEXT: vslideup.vi v8, v12, 9, v0.t 897; CHECK-NEXT: ret 898entry: 899 %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16( 900 <vscale x 16 x i16> %0, 901 <vscale x 16 x i16> %1, 902 iXLen 9, 903 <vscale x 16 x i1> %2, 904 iXLen %3, iXLen 0) 905 906 ret <vscale x 16 x i16> %a 907} 908 909declare <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32( 910 <vscale x 1 x i32>, 911 <vscale x 1 x i32>, 912 iXLen, 913 iXLen, 914 iXLen); 915 916define <vscale x 1 x i32> @intrinsic_vslideup_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2, iXLen %3) nounwind { 917; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i32_nxv1i32: 918; CHECK: # %bb.0: # %entry 919; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 920; CHECK-NEXT: vslideup.vx v8, v9, a0 921; CHECK-NEXT: ret 922entry: 923 %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32( 924 <vscale x 1 x i32> %0, 925 <vscale x 1 x i32> %1, 926 iXLen %2, 927 iXLen %3, 928 iXLen 1) 929 930 ret <vscale x 1 x i32> %a 931} 932 933declare <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32( 934 <vscale x 1 x i32>, 935 <vscale x 1 x i32>, 936 iXLen, 937 <vscale x 1 x i1>, 938 iXLen, iXLen); 939 940define <vscale x 1 x i32> @intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 941; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32: 942; CHECK: # %bb.0: # %entry 943; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu 944; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t 945; CHECK-NEXT: ret 946entry: 947 %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32( 948 <vscale x 1 x i32> %0, 949 <vscale x 1 x i32> %1, 950 iXLen %2, 951 <vscale x 1 x i1> %3, 952 iXLen %4, iXLen 0) 953 954 ret <vscale x 1 x i32> %a 955} 956 957define <vscale x 1 x i32> @intrinsic_vslideup_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind { 958; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i32_nxv1i32: 959; CHECK: # %bb.0: # %entry 960; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 961; CHECK-NEXT: vslideup.vi v8, v9, 9 962; CHECK-NEXT: ret 963entry: 964 %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32( 965 <vscale x 1 x i32> %0, 966 <vscale x 1 x i32> %1, 967 iXLen 9, 968 iXLen %2, 969 iXLen 1) 970 971 ret <vscale x 1 x i32> %a 972} 973 974define <vscale x 1 x i32> @intrinsic_vslideup_mask_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind { 975; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i32_nxv1i32: 976; CHECK: # %bb.0: # %entry 977; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu 978; CHECK-NEXT: vslideup.vi v8, v9, 9, v0.t 979; CHECK-NEXT: ret 980entry: 981 %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32( 982 <vscale x 1 x i32> %0, 983 <vscale x 1 x i32> %1, 984 iXLen 9, 985 <vscale x 1 x i1> %2, 986 iXLen %3, iXLen 0) 987 988 ret <vscale x 1 x i32> %a 989} 990 991declare <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32( 992 <vscale x 2 x i32>, 993 <vscale x 2 x i32>, 994 iXLen, 995 iXLen, 996 iXLen); 997 998define <vscale x 2 x i32> @intrinsic_vslideup_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2, iXLen %3) nounwind { 999; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i32_nxv2i32: 1000; CHECK: # %bb.0: # %entry 1001; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 1002; CHECK-NEXT: vslideup.vx v8, v9, a0 1003; CHECK-NEXT: ret 1004entry: 1005 %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32( 1006 <vscale x 2 x i32> %0, 1007 <vscale x 2 x i32> %1, 1008 iXLen %2, 1009 iXLen %3, 1010 iXLen 1) 1011 1012 ret <vscale x 2 x i32> %a 1013} 1014 1015declare <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32( 1016 <vscale x 2 x i32>, 1017 <vscale x 2 x i32>, 1018 iXLen, 1019 <vscale x 2 x i1>, 1020 iXLen, iXLen); 1021 1022define <vscale x 2 x i32> @intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 1023; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32: 1024; CHECK: # %bb.0: # %entry 1025; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu 1026; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t 1027; CHECK-NEXT: ret 1028entry: 1029 %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32( 1030 <vscale x 2 x i32> %0, 1031 <vscale x 2 x i32> %1, 1032 iXLen %2, 1033 <vscale x 2 x i1> %3, 1034 iXLen %4, iXLen 0) 1035 1036 ret <vscale x 2 x i32> %a 1037} 1038 1039define <vscale x 2 x i32> @intrinsic_vslideup_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind { 1040; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i32_nxv2i32: 1041; CHECK: # %bb.0: # %entry 1042; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 1043; CHECK-NEXT: vslideup.vi v8, v9, 9 1044; CHECK-NEXT: ret 1045entry: 1046 %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32( 1047 <vscale x 2 x i32> %0, 1048 <vscale x 2 x i32> %1, 1049 iXLen 9, 1050 iXLen %2, 1051 iXLen 1) 1052 1053 ret <vscale x 2 x i32> %a 1054} 1055 1056define <vscale x 2 x i32> @intrinsic_vslideup_mask_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind { 1057; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i32_nxv2i32: 1058; CHECK: # %bb.0: # %entry 1059; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu 1060; CHECK-NEXT: vslideup.vi v8, v9, 9, v0.t 1061; CHECK-NEXT: ret 1062entry: 1063 %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32( 1064 <vscale x 2 x i32> %0, 1065 <vscale x 2 x i32> %1, 1066 iXLen 9, 1067 <vscale x 2 x i1> %2, 1068 iXLen %3, iXLen 0) 1069 1070 ret <vscale x 2 x i32> %a 1071} 1072 1073declare <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32( 1074 <vscale x 4 x i32>, 1075 <vscale x 4 x i32>, 1076 iXLen, 1077 iXLen, 1078 iXLen); 1079 1080define <vscale x 4 x i32> @intrinsic_vslideup_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2, iXLen %3) nounwind { 1081; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i32_nxv4i32: 1082; CHECK: # %bb.0: # %entry 1083; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 1084; CHECK-NEXT: vslideup.vx v8, v10, a0 1085; CHECK-NEXT: ret 1086entry: 1087 %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32( 1088 <vscale x 4 x i32> %0, 1089 <vscale x 4 x i32> %1, 1090 iXLen %2, 1091 iXLen %3, 1092 iXLen 1) 1093 1094 ret <vscale x 4 x i32> %a 1095} 1096 1097declare <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32( 1098 <vscale x 4 x i32>, 1099 <vscale x 4 x i32>, 1100 iXLen, 1101 <vscale x 4 x i1>, 1102 iXLen, iXLen); 1103 1104define <vscale x 4 x i32> @intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1105; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32: 1106; CHECK: # %bb.0: # %entry 1107; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu 1108; CHECK-NEXT: vslideup.vx v8, v10, a0, v0.t 1109; CHECK-NEXT: ret 1110entry: 1111 %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32( 1112 <vscale x 4 x i32> %0, 1113 <vscale x 4 x i32> %1, 1114 iXLen %2, 1115 <vscale x 4 x i1> %3, 1116 iXLen %4, iXLen 0) 1117 1118 ret <vscale x 4 x i32> %a 1119} 1120 1121define <vscale x 4 x i32> @intrinsic_vslideup_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind { 1122; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i32_nxv4i32: 1123; CHECK: # %bb.0: # %entry 1124; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 1125; CHECK-NEXT: vslideup.vi v8, v10, 9 1126; CHECK-NEXT: ret 1127entry: 1128 %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32( 1129 <vscale x 4 x i32> %0, 1130 <vscale x 4 x i32> %1, 1131 iXLen 9, 1132 iXLen %2, 1133 iXLen 1) 1134 1135 ret <vscale x 4 x i32> %a 1136} 1137 1138define <vscale x 4 x i32> @intrinsic_vslideup_mask_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind { 1139; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i32_nxv4i32: 1140; CHECK: # %bb.0: # %entry 1141; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu 1142; CHECK-NEXT: vslideup.vi v8, v10, 9, v0.t 1143; CHECK-NEXT: ret 1144entry: 1145 %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32( 1146 <vscale x 4 x i32> %0, 1147 <vscale x 4 x i32> %1, 1148 iXLen 9, 1149 <vscale x 4 x i1> %2, 1150 iXLen %3, iXLen 0) 1151 1152 ret <vscale x 4 x i32> %a 1153} 1154 1155declare <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32( 1156 <vscale x 8 x i32>, 1157 <vscale x 8 x i32>, 1158 iXLen, 1159 iXLen, 1160 iXLen); 1161 1162define <vscale x 8 x i32> @intrinsic_vslideup_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2, iXLen %3) nounwind { 1163; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i32_nxv8i32: 1164; CHECK: # %bb.0: # %entry 1165; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 1166; CHECK-NEXT: vslideup.vx v8, v12, a0 1167; CHECK-NEXT: ret 1168entry: 1169 %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32( 1170 <vscale x 8 x i32> %0, 1171 <vscale x 8 x i32> %1, 1172 iXLen %2, 1173 iXLen %3, 1174 iXLen 1) 1175 1176 ret <vscale x 8 x i32> %a 1177} 1178 1179declare <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32( 1180 <vscale x 8 x i32>, 1181 <vscale x 8 x i32>, 1182 iXLen, 1183 <vscale x 8 x i1>, 1184 iXLen, iXLen); 1185 1186define <vscale x 8 x i32> @intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1187; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32: 1188; CHECK: # %bb.0: # %entry 1189; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu 1190; CHECK-NEXT: vslideup.vx v8, v12, a0, v0.t 1191; CHECK-NEXT: ret 1192entry: 1193 %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32( 1194 <vscale x 8 x i32> %0, 1195 <vscale x 8 x i32> %1, 1196 iXLen %2, 1197 <vscale x 8 x i1> %3, 1198 iXLen %4, iXLen 0) 1199 1200 ret <vscale x 8 x i32> %a 1201} 1202 1203define <vscale x 8 x i32> @intrinsic_vslideup_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind { 1204; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i32_nxv8i32: 1205; CHECK: # %bb.0: # %entry 1206; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 1207; CHECK-NEXT: vslideup.vi v8, v12, 9 1208; CHECK-NEXT: ret 1209entry: 1210 %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32( 1211 <vscale x 8 x i32> %0, 1212 <vscale x 8 x i32> %1, 1213 iXLen 9, 1214 iXLen %2, 1215 iXLen 1) 1216 1217 ret <vscale x 8 x i32> %a 1218} 1219 1220define <vscale x 8 x i32> @intrinsic_vslideup_mask_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind { 1221; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i32_nxv8i32: 1222; CHECK: # %bb.0: # %entry 1223; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu 1224; CHECK-NEXT: vslideup.vi v8, v12, 9, v0.t 1225; CHECK-NEXT: ret 1226entry: 1227 %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32( 1228 <vscale x 8 x i32> %0, 1229 <vscale x 8 x i32> %1, 1230 iXLen 9, 1231 <vscale x 8 x i1> %2, 1232 iXLen %3, iXLen 0) 1233 1234 ret <vscale x 8 x i32> %a 1235} 1236 1237declare <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64( 1238 <vscale x 1 x i64>, 1239 <vscale x 1 x i64>, 1240 iXLen, 1241 iXLen, 1242 iXLen); 1243 1244define <vscale x 1 x i64> @intrinsic_vslideup_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2, iXLen %3) nounwind { 1245; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i64_nxv1i64: 1246; CHECK: # %bb.0: # %entry 1247; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 1248; CHECK-NEXT: vslideup.vx v8, v9, a0 1249; CHECK-NEXT: ret 1250entry: 1251 %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64( 1252 <vscale x 1 x i64> %0, 1253 <vscale x 1 x i64> %1, 1254 iXLen %2, 1255 iXLen %3, 1256 iXLen 1) 1257 1258 ret <vscale x 1 x i64> %a 1259} 1260 1261declare <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64( 1262 <vscale x 1 x i64>, 1263 <vscale x 1 x i64>, 1264 iXLen, 1265 <vscale x 1 x i1>, 1266 iXLen, iXLen); 1267 1268define <vscale x 1 x i64> @intrinsic_vslideup_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 1269; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i64_nxv1i64: 1270; CHECK: # %bb.0: # %entry 1271; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu 1272; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t 1273; CHECK-NEXT: ret 1274entry: 1275 %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64( 1276 <vscale x 1 x i64> %0, 1277 <vscale x 1 x i64> %1, 1278 iXLen %2, 1279 <vscale x 1 x i1> %3, 1280 iXLen %4, iXLen 0) 1281 1282 ret <vscale x 1 x i64> %a 1283} 1284 1285define <vscale x 1 x i64> @intrinsic_vslideup_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind { 1286; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i64_nxv1i64: 1287; CHECK: # %bb.0: # %entry 1288; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 1289; CHECK-NEXT: vslideup.vi v8, v9, 9 1290; CHECK-NEXT: ret 1291entry: 1292 %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64( 1293 <vscale x 1 x i64> %0, 1294 <vscale x 1 x i64> %1, 1295 iXLen 9, 1296 iXLen %2, 1297 iXLen 1) 1298 1299 ret <vscale x 1 x i64> %a 1300} 1301 1302define <vscale x 1 x i64> @intrinsic_vslideup_mask_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind { 1303; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i64_nxv1i64: 1304; CHECK: # %bb.0: # %entry 1305; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu 1306; CHECK-NEXT: vslideup.vi v8, v9, 9, v0.t 1307; CHECK-NEXT: ret 1308entry: 1309 %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64( 1310 <vscale x 1 x i64> %0, 1311 <vscale x 1 x i64> %1, 1312 iXLen 9, 1313 <vscale x 1 x i1> %2, 1314 iXLen %3, iXLen 0) 1315 1316 ret <vscale x 1 x i64> %a 1317} 1318 1319declare <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64( 1320 <vscale x 2 x i64>, 1321 <vscale x 2 x i64>, 1322 iXLen, 1323 iXLen, 1324 iXLen); 1325 1326define <vscale x 2 x i64> @intrinsic_vslideup_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2, iXLen %3) nounwind { 1327; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i64_nxv2i64: 1328; CHECK: # %bb.0: # %entry 1329; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 1330; CHECK-NEXT: vslideup.vx v8, v10, a0 1331; CHECK-NEXT: ret 1332entry: 1333 %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64( 1334 <vscale x 2 x i64> %0, 1335 <vscale x 2 x i64> %1, 1336 iXLen %2, 1337 iXLen %3, 1338 iXLen 1) 1339 1340 ret <vscale x 2 x i64> %a 1341} 1342 1343declare <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64( 1344 <vscale x 2 x i64>, 1345 <vscale x 2 x i64>, 1346 iXLen, 1347 <vscale x 2 x i1>, 1348 iXLen, iXLen); 1349 1350define <vscale x 2 x i64> @intrinsic_vslideup_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 1351; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i64_nxv2i64: 1352; CHECK: # %bb.0: # %entry 1353; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu 1354; CHECK-NEXT: vslideup.vx v8, v10, a0, v0.t 1355; CHECK-NEXT: ret 1356entry: 1357 %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64( 1358 <vscale x 2 x i64> %0, 1359 <vscale x 2 x i64> %1, 1360 iXLen %2, 1361 <vscale x 2 x i1> %3, 1362 iXLen %4, iXLen 0) 1363 1364 ret <vscale x 2 x i64> %a 1365} 1366 1367define <vscale x 2 x i64> @intrinsic_vslideup_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind { 1368; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i64_nxv2i64: 1369; CHECK: # %bb.0: # %entry 1370; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 1371; CHECK-NEXT: vslideup.vi v8, v10, 9 1372; CHECK-NEXT: ret 1373entry: 1374 %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64( 1375 <vscale x 2 x i64> %0, 1376 <vscale x 2 x i64> %1, 1377 iXLen 9, 1378 iXLen %2, 1379 iXLen 1) 1380 1381 ret <vscale x 2 x i64> %a 1382} 1383 1384define <vscale x 2 x i64> @intrinsic_vslideup_mask_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind { 1385; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i64_nxv2i64: 1386; CHECK: # %bb.0: # %entry 1387; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu 1388; CHECK-NEXT: vslideup.vi v8, v10, 9, v0.t 1389; CHECK-NEXT: ret 1390entry: 1391 %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64( 1392 <vscale x 2 x i64> %0, 1393 <vscale x 2 x i64> %1, 1394 iXLen 9, 1395 <vscale x 2 x i1> %2, 1396 iXLen %3, iXLen 0) 1397 1398 ret <vscale x 2 x i64> %a 1399} 1400 1401declare <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64( 1402 <vscale x 4 x i64>, 1403 <vscale x 4 x i64>, 1404 iXLen, 1405 iXLen, 1406 iXLen); 1407 1408define <vscale x 4 x i64> @intrinsic_vslideup_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2, iXLen %3) nounwind { 1409; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i64_nxv4i64: 1410; CHECK: # %bb.0: # %entry 1411; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma 1412; CHECK-NEXT: vslideup.vx v8, v12, a0 1413; CHECK-NEXT: ret 1414entry: 1415 %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64( 1416 <vscale x 4 x i64> %0, 1417 <vscale x 4 x i64> %1, 1418 iXLen %2, 1419 iXLen %3, 1420 iXLen 1) 1421 1422 ret <vscale x 4 x i64> %a 1423} 1424 1425declare <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64( 1426 <vscale x 4 x i64>, 1427 <vscale x 4 x i64>, 1428 iXLen, 1429 <vscale x 4 x i1>, 1430 iXLen, iXLen); 1431 1432define <vscale x 4 x i64> @intrinsic_vslideup_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1433; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i64_nxv4i64: 1434; CHECK: # %bb.0: # %entry 1435; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu 1436; CHECK-NEXT: vslideup.vx v8, v12, a0, v0.t 1437; CHECK-NEXT: ret 1438entry: 1439 %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64( 1440 <vscale x 4 x i64> %0, 1441 <vscale x 4 x i64> %1, 1442 iXLen %2, 1443 <vscale x 4 x i1> %3, 1444 iXLen %4, iXLen 0) 1445 1446 ret <vscale x 4 x i64> %a 1447} 1448 1449define <vscale x 4 x i64> @intrinsic_vslideup_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind { 1450; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i64_nxv4i64: 1451; CHECK: # %bb.0: # %entry 1452; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 1453; CHECK-NEXT: vslideup.vi v8, v12, 9 1454; CHECK-NEXT: ret 1455entry: 1456 %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64( 1457 <vscale x 4 x i64> %0, 1458 <vscale x 4 x i64> %1, 1459 iXLen 9, 1460 iXLen %2, 1461 iXLen 1) 1462 1463 ret <vscale x 4 x i64> %a 1464} 1465 1466define <vscale x 4 x i64> @intrinsic_vslideup_mask_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind { 1467; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i64_nxv4i64: 1468; CHECK: # %bb.0: # %entry 1469; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu 1470; CHECK-NEXT: vslideup.vi v8, v12, 9, v0.t 1471; CHECK-NEXT: ret 1472entry: 1473 %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64( 1474 <vscale x 4 x i64> %0, 1475 <vscale x 4 x i64> %1, 1476 iXLen 9, 1477 <vscale x 4 x i1> %2, 1478 iXLen %3, iXLen 0) 1479 1480 ret <vscale x 4 x i64> %a 1481} 1482 1483declare <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16( 1484 <vscale x 1 x half>, 1485 <vscale x 1 x half>, 1486 iXLen, 1487 iXLen, 1488 iXLen); 1489 1490define <vscale x 1 x half> @intrinsic_vslideup_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2, iXLen %3) nounwind { 1491; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f16_nxv1f16: 1492; CHECK: # %bb.0: # %entry 1493; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 1494; CHECK-NEXT: vslideup.vx v8, v9, a0 1495; CHECK-NEXT: ret 1496entry: 1497 %a = call <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16( 1498 <vscale x 1 x half> %0, 1499 <vscale x 1 x half> %1, 1500 iXLen %2, 1501 iXLen %3, 1502 iXLen 1) 1503 1504 ret <vscale x 1 x half> %a 1505} 1506 1507declare <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16( 1508 <vscale x 1 x half>, 1509 <vscale x 1 x half>, 1510 iXLen, 1511 <vscale x 1 x i1>, 1512 iXLen, iXLen); 1513 1514define <vscale x 1 x half> @intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 1515; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16: 1516; CHECK: # %bb.0: # %entry 1517; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu 1518; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t 1519; CHECK-NEXT: ret 1520entry: 1521 %a = call <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16( 1522 <vscale x 1 x half> %0, 1523 <vscale x 1 x half> %1, 1524 iXLen %2, 1525 <vscale x 1 x i1> %3, 1526 iXLen %4, iXLen 0) 1527 1528 ret <vscale x 1 x half> %a 1529} 1530 1531define <vscale x 1 x half> @intrinsic_vslideup_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind { 1532; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f16_nxv1f16: 1533; CHECK: # %bb.0: # %entry 1534; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 1535; CHECK-NEXT: vslideup.vi v8, v9, 9 1536; CHECK-NEXT: ret 1537entry: 1538 %a = call <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16( 1539 <vscale x 1 x half> %0, 1540 <vscale x 1 x half> %1, 1541 iXLen 9, 1542 iXLen %2, 1543 iXLen 1) 1544 1545 ret <vscale x 1 x half> %a 1546} 1547 1548define <vscale x 1 x half> @intrinsic_vslideup_mask_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind { 1549; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1f16_nxv1f16: 1550; CHECK: # %bb.0: # %entry 1551; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu 1552; CHECK-NEXT: vslideup.vi v8, v9, 9, v0.t 1553; CHECK-NEXT: ret 1554entry: 1555 %a = call <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16( 1556 <vscale x 1 x half> %0, 1557 <vscale x 1 x half> %1, 1558 iXLen 9, 1559 <vscale x 1 x i1> %2, 1560 iXLen %3, iXLen 0) 1561 1562 ret <vscale x 1 x half> %a 1563} 1564 1565declare <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16( 1566 <vscale x 2 x half>, 1567 <vscale x 2 x half>, 1568 iXLen, 1569 iXLen, 1570 iXLen); 1571 1572define <vscale x 2 x half> @intrinsic_vslideup_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2, iXLen %3) nounwind { 1573; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f16_nxv2f16: 1574; CHECK: # %bb.0: # %entry 1575; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 1576; CHECK-NEXT: vslideup.vx v8, v9, a0 1577; CHECK-NEXT: ret 1578entry: 1579 %a = call <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16( 1580 <vscale x 2 x half> %0, 1581 <vscale x 2 x half> %1, 1582 iXLen %2, 1583 iXLen %3, 1584 iXLen 1) 1585 1586 ret <vscale x 2 x half> %a 1587} 1588 1589declare <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16( 1590 <vscale x 2 x half>, 1591 <vscale x 2 x half>, 1592 iXLen, 1593 <vscale x 2 x i1>, 1594 iXLen, iXLen); 1595 1596define <vscale x 2 x half> @intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 1597; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16: 1598; CHECK: # %bb.0: # %entry 1599; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu 1600; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t 1601; CHECK-NEXT: ret 1602entry: 1603 %a = call <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16( 1604 <vscale x 2 x half> %0, 1605 <vscale x 2 x half> %1, 1606 iXLen %2, 1607 <vscale x 2 x i1> %3, 1608 iXLen %4, iXLen 0) 1609 1610 ret <vscale x 2 x half> %a 1611} 1612 1613define <vscale x 2 x half> @intrinsic_vslideup_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2) nounwind { 1614; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f16_nxv2f16: 1615; CHECK: # %bb.0: # %entry 1616; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 1617; CHECK-NEXT: vslideup.vi v8, v9, 9 1618; CHECK-NEXT: ret 1619entry: 1620 %a = call <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16( 1621 <vscale x 2 x half> %0, 1622 <vscale x 2 x half> %1, 1623 iXLen 9, 1624 iXLen %2, 1625 iXLen 1) 1626 1627 ret <vscale x 2 x half> %a 1628} 1629 1630define <vscale x 2 x half> @intrinsic_vslideup_mask_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind { 1631; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2f16_nxv2f16: 1632; CHECK: # %bb.0: # %entry 1633; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu 1634; CHECK-NEXT: vslideup.vi v8, v9, 9, v0.t 1635; CHECK-NEXT: ret 1636entry: 1637 %a = call <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16( 1638 <vscale x 2 x half> %0, 1639 <vscale x 2 x half> %1, 1640 iXLen 9, 1641 <vscale x 2 x i1> %2, 1642 iXLen %3, iXLen 0) 1643 1644 ret <vscale x 2 x half> %a 1645} 1646 1647declare <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16( 1648 <vscale x 4 x half>, 1649 <vscale x 4 x half>, 1650 iXLen, 1651 iXLen, 1652 iXLen); 1653 1654define <vscale x 4 x half> @intrinsic_vslideup_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2, iXLen %3) nounwind { 1655; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f16_nxv4f16: 1656; CHECK: # %bb.0: # %entry 1657; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 1658; CHECK-NEXT: vslideup.vx v8, v9, a0 1659; CHECK-NEXT: ret 1660entry: 1661 %a = call <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16( 1662 <vscale x 4 x half> %0, 1663 <vscale x 4 x half> %1, 1664 iXLen %2, 1665 iXLen %3, 1666 iXLen 1) 1667 1668 ret <vscale x 4 x half> %a 1669} 1670 1671declare <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16( 1672 <vscale x 4 x half>, 1673 <vscale x 4 x half>, 1674 iXLen, 1675 <vscale x 4 x i1>, 1676 iXLen, iXLen); 1677 1678define <vscale x 4 x half> @intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1679; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16: 1680; CHECK: # %bb.0: # %entry 1681; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu 1682; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t 1683; CHECK-NEXT: ret 1684entry: 1685 %a = call <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16( 1686 <vscale x 4 x half> %0, 1687 <vscale x 4 x half> %1, 1688 iXLen %2, 1689 <vscale x 4 x i1> %3, 1690 iXLen %4, iXLen 0) 1691 1692 ret <vscale x 4 x half> %a 1693} 1694 1695define <vscale x 4 x half> @intrinsic_vslideup_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind { 1696; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f16_nxv4f16: 1697; CHECK: # %bb.0: # %entry 1698; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 1699; CHECK-NEXT: vslideup.vi v8, v9, 9 1700; CHECK-NEXT: ret 1701entry: 1702 %a = call <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16( 1703 <vscale x 4 x half> %0, 1704 <vscale x 4 x half> %1, 1705 iXLen 9, 1706 iXLen %2, 1707 iXLen 1) 1708 1709 ret <vscale x 4 x half> %a 1710} 1711 1712define <vscale x 4 x half> @intrinsic_vslideup_mask_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind { 1713; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4f16_nxv4f16: 1714; CHECK: # %bb.0: # %entry 1715; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu 1716; CHECK-NEXT: vslideup.vi v8, v9, 9, v0.t 1717; CHECK-NEXT: ret 1718entry: 1719 %a = call <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16( 1720 <vscale x 4 x half> %0, 1721 <vscale x 4 x half> %1, 1722 iXLen 9, 1723 <vscale x 4 x i1> %2, 1724 iXLen %3, iXLen 0) 1725 1726 ret <vscale x 4 x half> %a 1727} 1728 1729declare <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16( 1730 <vscale x 8 x half>, 1731 <vscale x 8 x half>, 1732 iXLen, 1733 iXLen, 1734 iXLen); 1735 1736define <vscale x 8 x half> @intrinsic_vslideup_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2, iXLen %3) nounwind { 1737; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f16_nxv8f16: 1738; CHECK: # %bb.0: # %entry 1739; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 1740; CHECK-NEXT: vslideup.vx v8, v10, a0 1741; CHECK-NEXT: ret 1742entry: 1743 %a = call <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16( 1744 <vscale x 8 x half> %0, 1745 <vscale x 8 x half> %1, 1746 iXLen %2, 1747 iXLen %3, 1748 iXLen 1) 1749 1750 ret <vscale x 8 x half> %a 1751} 1752 1753declare <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16( 1754 <vscale x 8 x half>, 1755 <vscale x 8 x half>, 1756 iXLen, 1757 <vscale x 8 x i1>, 1758 iXLen, iXLen); 1759 1760define <vscale x 8 x half> @intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1761; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16: 1762; CHECK: # %bb.0: # %entry 1763; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu 1764; CHECK-NEXT: vslideup.vx v8, v10, a0, v0.t 1765; CHECK-NEXT: ret 1766entry: 1767 %a = call <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16( 1768 <vscale x 8 x half> %0, 1769 <vscale x 8 x half> %1, 1770 iXLen %2, 1771 <vscale x 8 x i1> %3, 1772 iXLen %4, iXLen 0) 1773 1774 ret <vscale x 8 x half> %a 1775} 1776 1777define <vscale x 8 x half> @intrinsic_vslideup_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2) nounwind { 1778; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f16_nxv8f16: 1779; CHECK: # %bb.0: # %entry 1780; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 1781; CHECK-NEXT: vslideup.vi v8, v10, 9 1782; CHECK-NEXT: ret 1783entry: 1784 %a = call <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16( 1785 <vscale x 8 x half> %0, 1786 <vscale x 8 x half> %1, 1787 iXLen 9, 1788 iXLen %2, 1789 iXLen 1) 1790 1791 ret <vscale x 8 x half> %a 1792} 1793 1794define <vscale x 8 x half> @intrinsic_vslideup_mask_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind { 1795; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8f16_nxv8f16: 1796; CHECK: # %bb.0: # %entry 1797; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu 1798; CHECK-NEXT: vslideup.vi v8, v10, 9, v0.t 1799; CHECK-NEXT: ret 1800entry: 1801 %a = call <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16( 1802 <vscale x 8 x half> %0, 1803 <vscale x 8 x half> %1, 1804 iXLen 9, 1805 <vscale x 8 x i1> %2, 1806 iXLen %3, iXLen 0) 1807 1808 ret <vscale x 8 x half> %a 1809} 1810 1811declare <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16( 1812 <vscale x 16 x half>, 1813 <vscale x 16 x half>, 1814 iXLen, 1815 iXLen, 1816 iXLen); 1817 1818define <vscale x 16 x half> @intrinsic_vslideup_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2, iXLen %3) nounwind { 1819; CHECK-LABEL: intrinsic_vslideup_vx_nxv16f16_nxv16f16: 1820; CHECK: # %bb.0: # %entry 1821; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 1822; CHECK-NEXT: vslideup.vx v8, v12, a0 1823; CHECK-NEXT: ret 1824entry: 1825 %a = call <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16( 1826 <vscale x 16 x half> %0, 1827 <vscale x 16 x half> %1, 1828 iXLen %2, 1829 iXLen %3, 1830 iXLen 1) 1831 1832 ret <vscale x 16 x half> %a 1833} 1834 1835declare <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16( 1836 <vscale x 16 x half>, 1837 <vscale x 16 x half>, 1838 iXLen, 1839 <vscale x 16 x i1>, 1840 iXLen, iXLen); 1841 1842define <vscale x 16 x half> @intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 1843; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16: 1844; CHECK: # %bb.0: # %entry 1845; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu 1846; CHECK-NEXT: vslideup.vx v8, v12, a0, v0.t 1847; CHECK-NEXT: ret 1848entry: 1849 %a = call <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16( 1850 <vscale x 16 x half> %0, 1851 <vscale x 16 x half> %1, 1852 iXLen %2, 1853 <vscale x 16 x i1> %3, 1854 iXLen %4, iXLen 0) 1855 1856 ret <vscale x 16 x half> %a 1857} 1858 1859define <vscale x 16 x half> @intrinsic_vslideup_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2) nounwind { 1860; CHECK-LABEL: intrinsic_vslideup_vi_nxv16f16_nxv16f16: 1861; CHECK: # %bb.0: # %entry 1862; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma 1863; CHECK-NEXT: vslideup.vi v8, v12, 9 1864; CHECK-NEXT: ret 1865entry: 1866 %a = call <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16( 1867 <vscale x 16 x half> %0, 1868 <vscale x 16 x half> %1, 1869 iXLen 9, 1870 iXLen %2, 1871 iXLen 1) 1872 1873 ret <vscale x 16 x half> %a 1874} 1875 1876define <vscale x 16 x half> @intrinsic_vslideup_mask_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind { 1877; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16f16_nxv16f16: 1878; CHECK: # %bb.0: # %entry 1879; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu 1880; CHECK-NEXT: vslideup.vi v8, v12, 9, v0.t 1881; CHECK-NEXT: ret 1882entry: 1883 %a = call <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16( 1884 <vscale x 16 x half> %0, 1885 <vscale x 16 x half> %1, 1886 iXLen 9, 1887 <vscale x 16 x i1> %2, 1888 iXLen %3, iXLen 0) 1889 1890 ret <vscale x 16 x half> %a 1891} 1892 1893declare <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32( 1894 <vscale x 1 x float>, 1895 <vscale x 1 x float>, 1896 iXLen, 1897 iXLen, 1898 iXLen); 1899 1900define <vscale x 1 x float> @intrinsic_vslideup_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2, iXLen %3) nounwind { 1901; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f32_nxv1f32: 1902; CHECK: # %bb.0: # %entry 1903; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 1904; CHECK-NEXT: vslideup.vx v8, v9, a0 1905; CHECK-NEXT: ret 1906entry: 1907 %a = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32( 1908 <vscale x 1 x float> %0, 1909 <vscale x 1 x float> %1, 1910 iXLen %2, 1911 iXLen %3, 1912 iXLen 1) 1913 1914 ret <vscale x 1 x float> %a 1915} 1916 1917declare <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32( 1918 <vscale x 1 x float>, 1919 <vscale x 1 x float>, 1920 iXLen, 1921 <vscale x 1 x i1>, 1922 iXLen, iXLen); 1923 1924define <vscale x 1 x float> @intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 1925; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32: 1926; CHECK: # %bb.0: # %entry 1927; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu 1928; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t 1929; CHECK-NEXT: ret 1930entry: 1931 %a = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32( 1932 <vscale x 1 x float> %0, 1933 <vscale x 1 x float> %1, 1934 iXLen %2, 1935 <vscale x 1 x i1> %3, 1936 iXLen %4, iXLen 0) 1937 1938 ret <vscale x 1 x float> %a 1939} 1940 1941define <vscale x 1 x float> @intrinsic_vslideup_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind { 1942; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f32_nxv1f32: 1943; CHECK: # %bb.0: # %entry 1944; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 1945; CHECK-NEXT: vslideup.vi v8, v9, 9 1946; CHECK-NEXT: ret 1947entry: 1948 %a = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32( 1949 <vscale x 1 x float> %0, 1950 <vscale x 1 x float> %1, 1951 iXLen 9, 1952 iXLen %2, 1953 iXLen 1) 1954 1955 ret <vscale x 1 x float> %a 1956} 1957 1958define <vscale x 1 x float> @intrinsic_vslideup_mask_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind { 1959; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1f32_nxv1f32: 1960; CHECK: # %bb.0: # %entry 1961; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu 1962; CHECK-NEXT: vslideup.vi v8, v9, 9, v0.t 1963; CHECK-NEXT: ret 1964entry: 1965 %a = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32( 1966 <vscale x 1 x float> %0, 1967 <vscale x 1 x float> %1, 1968 iXLen 9, 1969 <vscale x 1 x i1> %2, 1970 iXLen %3, iXLen 0) 1971 1972 ret <vscale x 1 x float> %a 1973} 1974 1975declare <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32( 1976 <vscale x 2 x float>, 1977 <vscale x 2 x float>, 1978 iXLen, 1979 iXLen, 1980 iXLen); 1981 1982define <vscale x 2 x float> @intrinsic_vslideup_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2, iXLen %3) nounwind { 1983; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f32_nxv2f32: 1984; CHECK: # %bb.0: # %entry 1985; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 1986; CHECK-NEXT: vslideup.vx v8, v9, a0 1987; CHECK-NEXT: ret 1988entry: 1989 %a = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32( 1990 <vscale x 2 x float> %0, 1991 <vscale x 2 x float> %1, 1992 iXLen %2, 1993 iXLen %3, 1994 iXLen 1) 1995 1996 ret <vscale x 2 x float> %a 1997} 1998 1999declare <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32( 2000 <vscale x 2 x float>, 2001 <vscale x 2 x float>, 2002 iXLen, 2003 <vscale x 2 x i1>, 2004 iXLen, iXLen); 2005 2006define <vscale x 2 x float> @intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 2007; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32: 2008; CHECK: # %bb.0: # %entry 2009; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu 2010; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t 2011; CHECK-NEXT: ret 2012entry: 2013 %a = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32( 2014 <vscale x 2 x float> %0, 2015 <vscale x 2 x float> %1, 2016 iXLen %2, 2017 <vscale x 2 x i1> %3, 2018 iXLen %4, iXLen 0) 2019 2020 ret <vscale x 2 x float> %a 2021} 2022 2023define <vscale x 2 x float> @intrinsic_vslideup_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2) nounwind { 2024; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f32_nxv2f32: 2025; CHECK: # %bb.0: # %entry 2026; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 2027; CHECK-NEXT: vslideup.vi v8, v9, 9 2028; CHECK-NEXT: ret 2029entry: 2030 %a = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32( 2031 <vscale x 2 x float> %0, 2032 <vscale x 2 x float> %1, 2033 iXLen 9, 2034 iXLen %2, 2035 iXLen 1) 2036 2037 ret <vscale x 2 x float> %a 2038} 2039 2040define <vscale x 2 x float> @intrinsic_vslideup_mask_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind { 2041; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2f32_nxv2f32: 2042; CHECK: # %bb.0: # %entry 2043; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu 2044; CHECK-NEXT: vslideup.vi v8, v9, 9, v0.t 2045; CHECK-NEXT: ret 2046entry: 2047 %a = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32( 2048 <vscale x 2 x float> %0, 2049 <vscale x 2 x float> %1, 2050 iXLen 9, 2051 <vscale x 2 x i1> %2, 2052 iXLen %3, iXLen 0) 2053 2054 ret <vscale x 2 x float> %a 2055} 2056 2057declare <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32( 2058 <vscale x 4 x float>, 2059 <vscale x 4 x float>, 2060 iXLen, 2061 iXLen, 2062 iXLen); 2063 2064define <vscale x 4 x float> @intrinsic_vslideup_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2, iXLen %3) nounwind { 2065; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f32_nxv4f32: 2066; CHECK: # %bb.0: # %entry 2067; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 2068; CHECK-NEXT: vslideup.vx v8, v10, a0 2069; CHECK-NEXT: ret 2070entry: 2071 %a = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32( 2072 <vscale x 4 x float> %0, 2073 <vscale x 4 x float> %1, 2074 iXLen %2, 2075 iXLen %3, 2076 iXLen 1) 2077 2078 ret <vscale x 4 x float> %a 2079} 2080 2081declare <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32( 2082 <vscale x 4 x float>, 2083 <vscale x 4 x float>, 2084 iXLen, 2085 <vscale x 4 x i1>, 2086 iXLen, iXLen); 2087 2088define <vscale x 4 x float> @intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 2089; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32: 2090; CHECK: # %bb.0: # %entry 2091; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu 2092; CHECK-NEXT: vslideup.vx v8, v10, a0, v0.t 2093; CHECK-NEXT: ret 2094entry: 2095 %a = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32( 2096 <vscale x 4 x float> %0, 2097 <vscale x 4 x float> %1, 2098 iXLen %2, 2099 <vscale x 4 x i1> %3, 2100 iXLen %4, iXLen 0) 2101 2102 ret <vscale x 4 x float> %a 2103} 2104 2105define <vscale x 4 x float> @intrinsic_vslideup_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2) nounwind { 2106; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f32_nxv4f32: 2107; CHECK: # %bb.0: # %entry 2108; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 2109; CHECK-NEXT: vslideup.vi v8, v10, 9 2110; CHECK-NEXT: ret 2111entry: 2112 %a = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32( 2113 <vscale x 4 x float> %0, 2114 <vscale x 4 x float> %1, 2115 iXLen 9, 2116 iXLen %2, 2117 iXLen 1) 2118 2119 ret <vscale x 4 x float> %a 2120} 2121 2122define <vscale x 4 x float> @intrinsic_vslideup_mask_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind { 2123; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4f32_nxv4f32: 2124; CHECK: # %bb.0: # %entry 2125; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu 2126; CHECK-NEXT: vslideup.vi v8, v10, 9, v0.t 2127; CHECK-NEXT: ret 2128entry: 2129 %a = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32( 2130 <vscale x 4 x float> %0, 2131 <vscale x 4 x float> %1, 2132 iXLen 9, 2133 <vscale x 4 x i1> %2, 2134 iXLen %3, iXLen 0) 2135 2136 ret <vscale x 4 x float> %a 2137} 2138 2139declare <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32( 2140 <vscale x 8 x float>, 2141 <vscale x 8 x float>, 2142 iXLen, 2143 iXLen, 2144 iXLen); 2145 2146define <vscale x 8 x float> @intrinsic_vslideup_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2, iXLen %3) nounwind { 2147; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f32_nxv8f32: 2148; CHECK: # %bb.0: # %entry 2149; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 2150; CHECK-NEXT: vslideup.vx v8, v12, a0 2151; CHECK-NEXT: ret 2152entry: 2153 %a = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32( 2154 <vscale x 8 x float> %0, 2155 <vscale x 8 x float> %1, 2156 iXLen %2, 2157 iXLen %3, 2158 iXLen 1) 2159 2160 ret <vscale x 8 x float> %a 2161} 2162 2163declare <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32( 2164 <vscale x 8 x float>, 2165 <vscale x 8 x float>, 2166 iXLen, 2167 <vscale x 8 x i1>, 2168 iXLen, iXLen); 2169 2170define <vscale x 8 x float> @intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 2171; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32: 2172; CHECK: # %bb.0: # %entry 2173; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu 2174; CHECK-NEXT: vslideup.vx v8, v12, a0, v0.t 2175; CHECK-NEXT: ret 2176entry: 2177 %a = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32( 2178 <vscale x 8 x float> %0, 2179 <vscale x 8 x float> %1, 2180 iXLen %2, 2181 <vscale x 8 x i1> %3, 2182 iXLen %4, iXLen 0) 2183 2184 ret <vscale x 8 x float> %a 2185} 2186 2187define <vscale x 8 x float> @intrinsic_vslideup_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2) nounwind { 2188; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f32_nxv8f32: 2189; CHECK: # %bb.0: # %entry 2190; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 2191; CHECK-NEXT: vslideup.vi v8, v12, 9 2192; CHECK-NEXT: ret 2193entry: 2194 %a = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32( 2195 <vscale x 8 x float> %0, 2196 <vscale x 8 x float> %1, 2197 iXLen 9, 2198 iXLen %2, 2199 iXLen 1) 2200 2201 ret <vscale x 8 x float> %a 2202} 2203 2204define <vscale x 8 x float> @intrinsic_vslideup_mask_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind { 2205; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8f32_nxv8f32: 2206; CHECK: # %bb.0: # %entry 2207; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu 2208; CHECK-NEXT: vslideup.vi v8, v12, 9, v0.t 2209; CHECK-NEXT: ret 2210entry: 2211 %a = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32( 2212 <vscale x 8 x float> %0, 2213 <vscale x 8 x float> %1, 2214 iXLen 9, 2215 <vscale x 8 x i1> %2, 2216 iXLen %3, iXLen 0) 2217 2218 ret <vscale x 8 x float> %a 2219} 2220 2221declare <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64( 2222 <vscale x 1 x double>, 2223 <vscale x 1 x double>, 2224 iXLen, 2225 iXLen, 2226 iXLen); 2227 2228define <vscale x 1 x double> @intrinsic_vslideup_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2, iXLen %3) nounwind { 2229; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f64_nxv1f64: 2230; CHECK: # %bb.0: # %entry 2231; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 2232; CHECK-NEXT: vslideup.vx v8, v9, a0 2233; CHECK-NEXT: ret 2234entry: 2235 %a = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64( 2236 <vscale x 1 x double> %0, 2237 <vscale x 1 x double> %1, 2238 iXLen %2, 2239 iXLen %3, 2240 iXLen 1) 2241 2242 ret <vscale x 1 x double> %a 2243} 2244 2245declare <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64( 2246 <vscale x 1 x double>, 2247 <vscale x 1 x double>, 2248 iXLen, 2249 <vscale x 1 x i1>, 2250 iXLen, iXLen); 2251 2252define <vscale x 1 x double> @intrinsic_vslideup_mask_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 2253; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f64_nxv1f64: 2254; CHECK: # %bb.0: # %entry 2255; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu 2256; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t 2257; CHECK-NEXT: ret 2258entry: 2259 %a = call <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64( 2260 <vscale x 1 x double> %0, 2261 <vscale x 1 x double> %1, 2262 iXLen %2, 2263 <vscale x 1 x i1> %3, 2264 iXLen %4, iXLen 0) 2265 2266 ret <vscale x 1 x double> %a 2267} 2268 2269define <vscale x 1 x double> @intrinsic_vslideup_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2) nounwind { 2270; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f64_nxv1f64: 2271; CHECK: # %bb.0: # %entry 2272; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 2273; CHECK-NEXT: vslideup.vi v8, v9, 9 2274; CHECK-NEXT: ret 2275entry: 2276 %a = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64( 2277 <vscale x 1 x double> %0, 2278 <vscale x 1 x double> %1, 2279 iXLen 9, 2280 iXLen %2, 2281 iXLen 1) 2282 2283 ret <vscale x 1 x double> %a 2284} 2285 2286define <vscale x 1 x double> @intrinsic_vslideup_mask_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind { 2287; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1f64_nxv1f64: 2288; CHECK: # %bb.0: # %entry 2289; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu 2290; CHECK-NEXT: vslideup.vi v8, v9, 9, v0.t 2291; CHECK-NEXT: ret 2292entry: 2293 %a = call <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64( 2294 <vscale x 1 x double> %0, 2295 <vscale x 1 x double> %1, 2296 iXLen 9, 2297 <vscale x 1 x i1> %2, 2298 iXLen %3, iXLen 0) 2299 2300 ret <vscale x 1 x double> %a 2301} 2302 2303declare <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64( 2304 <vscale x 2 x double>, 2305 <vscale x 2 x double>, 2306 iXLen, 2307 iXLen, 2308 iXLen); 2309 2310define <vscale x 2 x double> @intrinsic_vslideup_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2, iXLen %3) nounwind { 2311; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f64_nxv2f64: 2312; CHECK: # %bb.0: # %entry 2313; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 2314; CHECK-NEXT: vslideup.vx v8, v10, a0 2315; CHECK-NEXT: ret 2316entry: 2317 %a = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64( 2318 <vscale x 2 x double> %0, 2319 <vscale x 2 x double> %1, 2320 iXLen %2, 2321 iXLen %3, 2322 iXLen 1) 2323 2324 ret <vscale x 2 x double> %a 2325} 2326 2327declare <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64( 2328 <vscale x 2 x double>, 2329 <vscale x 2 x double>, 2330 iXLen, 2331 <vscale x 2 x i1>, 2332 iXLen, iXLen); 2333 2334define <vscale x 2 x double> @intrinsic_vslideup_mask_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 2335; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f64_nxv2f64: 2336; CHECK: # %bb.0: # %entry 2337; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu 2338; CHECK-NEXT: vslideup.vx v8, v10, a0, v0.t 2339; CHECK-NEXT: ret 2340entry: 2341 %a = call <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64( 2342 <vscale x 2 x double> %0, 2343 <vscale x 2 x double> %1, 2344 iXLen %2, 2345 <vscale x 2 x i1> %3, 2346 iXLen %4, iXLen 0) 2347 2348 ret <vscale x 2 x double> %a 2349} 2350 2351define <vscale x 2 x double> @intrinsic_vslideup_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2) nounwind { 2352; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f64_nxv2f64: 2353; CHECK: # %bb.0: # %entry 2354; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 2355; CHECK-NEXT: vslideup.vi v8, v10, 9 2356; CHECK-NEXT: ret 2357entry: 2358 %a = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64( 2359 <vscale x 2 x double> %0, 2360 <vscale x 2 x double> %1, 2361 iXLen 9, 2362 iXLen %2, 2363 iXLen 1) 2364 2365 ret <vscale x 2 x double> %a 2366} 2367 2368define <vscale x 2 x double> @intrinsic_vslideup_mask_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind { 2369; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2f64_nxv2f64: 2370; CHECK: # %bb.0: # %entry 2371; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu 2372; CHECK-NEXT: vslideup.vi v8, v10, 9, v0.t 2373; CHECK-NEXT: ret 2374entry: 2375 %a = call <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64( 2376 <vscale x 2 x double> %0, 2377 <vscale x 2 x double> %1, 2378 iXLen 9, 2379 <vscale x 2 x i1> %2, 2380 iXLen %3, iXLen 0) 2381 2382 ret <vscale x 2 x double> %a 2383} 2384 2385declare <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64( 2386 <vscale x 4 x double>, 2387 <vscale x 4 x double>, 2388 iXLen, 2389 iXLen, 2390 iXLen); 2391 2392define <vscale x 4 x double> @intrinsic_vslideup_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2, iXLen %3) nounwind { 2393; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f64_nxv4f64: 2394; CHECK: # %bb.0: # %entry 2395; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma 2396; CHECK-NEXT: vslideup.vx v8, v12, a0 2397; CHECK-NEXT: ret 2398entry: 2399 %a = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64( 2400 <vscale x 4 x double> %0, 2401 <vscale x 4 x double> %1, 2402 iXLen %2, 2403 iXLen %3, 2404 iXLen 1) 2405 2406 ret <vscale x 4 x double> %a 2407} 2408 2409declare <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64( 2410 <vscale x 4 x double>, 2411 <vscale x 4 x double>, 2412 iXLen, 2413 <vscale x 4 x i1>, 2414 iXLen, iXLen); 2415 2416define <vscale x 4 x double> @intrinsic_vslideup_mask_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 2417; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f64_nxv4f64: 2418; CHECK: # %bb.0: # %entry 2419; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu 2420; CHECK-NEXT: vslideup.vx v8, v12, a0, v0.t 2421; CHECK-NEXT: ret 2422entry: 2423 %a = call <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64( 2424 <vscale x 4 x double> %0, 2425 <vscale x 4 x double> %1, 2426 iXLen %2, 2427 <vscale x 4 x i1> %3, 2428 iXLen %4, iXLen 0) 2429 2430 ret <vscale x 4 x double> %a 2431} 2432 2433define <vscale x 4 x double> @intrinsic_vslideup_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2) nounwind { 2434; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f64_nxv4f64: 2435; CHECK: # %bb.0: # %entry 2436; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 2437; CHECK-NEXT: vslideup.vi v8, v12, 9 2438; CHECK-NEXT: ret 2439entry: 2440 %a = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64( 2441 <vscale x 4 x double> %0, 2442 <vscale x 4 x double> %1, 2443 iXLen 9, 2444 iXLen %2, 2445 iXLen 1) 2446 2447 ret <vscale x 4 x double> %a 2448} 2449 2450define <vscale x 4 x double> @intrinsic_vslideup_mask_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind { 2451; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4f64_nxv4f64: 2452; CHECK: # %bb.0: # %entry 2453; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu 2454; CHECK-NEXT: vslideup.vi v8, v12, 9, v0.t 2455; CHECK-NEXT: ret 2456entry: 2457 %a = call <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64( 2458 <vscale x 4 x double> %0, 2459 <vscale x 4 x double> %1, 2460 iXLen 9, 2461 <vscale x 4 x i1> %2, 2462 iXLen %3, iXLen 0) 2463 2464 ret <vscale x 4 x double> %a 2465} 2466