1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ 3; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ 5; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 6 7declare <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8( 8 <vscale x 1 x i8>, 9 <vscale x 1 x i8>, 10 i8, 11 iXLen) 12 13define <vscale x 1 x i8> @intrinsic_vslide1up_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind { 14; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i8_nxv1i8_i8: 15; CHECK: # %bb.0: # %entry 16; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 17; CHECK-NEXT: vslide1up.vx v9, v8, a0 18; CHECK-NEXT: vmv1r.v v8, v9 19; CHECK-NEXT: ret 20entry: 21 %a = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8( 22 <vscale x 1 x i8> undef, 23 <vscale x 1 x i8> %0, 24 i8 %1, 25 iXLen %2) 26 27 ret <vscale x 1 x i8> %a 28} 29 30declare <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8( 31 <vscale x 1 x i8>, 32 <vscale x 1 x i8>, 33 i8, 34 <vscale x 1 x i1>, 35 iXLen, 36 iXLen) 37 38define <vscale x 1 x i8> @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 39; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8: 40; CHECK: # %bb.0: # %entry 41; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu 42; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t 43; CHECK-NEXT: ret 44entry: 45 %a = call <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8( 46 <vscale x 1 x i8> %0, 47 <vscale x 1 x i8> %1, 48 i8 %2, 49 <vscale x 1 x i1> %3, 50 iXLen %4, iXLen 1) 51 52 ret <vscale x 1 x i8> %a 53} 54 55declare <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8( 56 <vscale x 2 x i8>, 57 <vscale x 2 x i8>, 58 i8, 59 iXLen) 60 61define <vscale x 2 x i8> @intrinsic_vslide1up_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind { 62; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i8_nxv2i8_i8: 63; CHECK: # %bb.0: # %entry 64; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 65; CHECK-NEXT: vslide1up.vx v9, v8, a0 66; CHECK-NEXT: vmv1r.v v8, v9 67; CHECK-NEXT: ret 68entry: 69 %a = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8( 70 <vscale x 2 x i8> undef, 71 <vscale x 2 x i8> %0, 72 i8 %1, 73 iXLen %2) 74 75 ret <vscale x 2 x i8> %a 76} 77 78declare <vscale x 2 x i8> @llvm.riscv.vslide1up.mask.nxv2i8.i8( 79 <vscale x 2 x i8>, 80 <vscale x 2 x i8>, 81 i8, 82 <vscale x 2 x i1>, 83 iXLen, 84 iXLen) 85 86define <vscale x 2 x i8> @intrinsic_vslide1up_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 87; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i8_nxv2i8_i8: 88; CHECK: # %bb.0: # %entry 89; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu 90; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t 91; CHECK-NEXT: ret 92entry: 93 %a = call <vscale x 2 x i8> @llvm.riscv.vslide1up.mask.nxv2i8.i8( 94 <vscale x 2 x i8> %0, 95 <vscale x 2 x i8> %1, 96 i8 %2, 97 <vscale x 2 x i1> %3, 98 iXLen %4, iXLen 1) 99 100 ret <vscale x 2 x i8> %a 101} 102 103declare <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8( 104 <vscale x 4 x i8>, 105 <vscale x 4 x i8>, 106 i8, 107 iXLen) 108 109define <vscale x 4 x i8> @intrinsic_vslide1up_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind { 110; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i8_nxv4i8_i8: 111; CHECK: # %bb.0: # %entry 112; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 113; CHECK-NEXT: vslide1up.vx v9, v8, a0 114; CHECK-NEXT: vmv1r.v v8, v9 115; CHECK-NEXT: ret 116entry: 117 %a = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8( 118 <vscale x 4 x i8> undef, 119 <vscale x 4 x i8> %0, 120 i8 %1, 121 iXLen %2) 122 123 ret <vscale x 4 x i8> %a 124} 125 126declare <vscale x 4 x i8> @llvm.riscv.vslide1up.mask.nxv4i8.i8( 127 <vscale x 4 x i8>, 128 <vscale x 4 x i8>, 129 i8, 130 <vscale x 4 x i1>, 131 iXLen, 132 iXLen) 133 134define <vscale x 4 x i8> @intrinsic_vslide1up_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 135; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i8_nxv4i8_i8: 136; CHECK: # %bb.0: # %entry 137; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu 138; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t 139; CHECK-NEXT: ret 140entry: 141 %a = call <vscale x 4 x i8> @llvm.riscv.vslide1up.mask.nxv4i8.i8( 142 <vscale x 4 x i8> %0, 143 <vscale x 4 x i8> %1, 144 i8 %2, 145 <vscale x 4 x i1> %3, 146 iXLen %4, iXLen 1) 147 148 ret <vscale x 4 x i8> %a 149} 150 151declare <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8( 152 <vscale x 8 x i8>, 153 <vscale x 8 x i8>, 154 i8, 155 iXLen) 156 157define <vscale x 8 x i8> @intrinsic_vslide1up_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind { 158; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i8_nxv8i8_i8: 159; CHECK: # %bb.0: # %entry 160; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 161; CHECK-NEXT: vslide1up.vx v9, v8, a0 162; CHECK-NEXT: vmv.v.v v8, v9 163; CHECK-NEXT: ret 164entry: 165 %a = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8( 166 <vscale x 8 x i8> undef, 167 <vscale x 8 x i8> %0, 168 i8 %1, 169 iXLen %2) 170 171 ret <vscale x 8 x i8> %a 172} 173 174declare <vscale x 8 x i8> @llvm.riscv.vslide1up.mask.nxv8i8.i8( 175 <vscale x 8 x i8>, 176 <vscale x 8 x i8>, 177 i8, 178 <vscale x 8 x i1>, 179 iXLen, 180 iXLen) 181 182define <vscale x 8 x i8> @intrinsic_vslide1up_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 183; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i8_nxv8i8_i8: 184; CHECK: # %bb.0: # %entry 185; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu 186; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t 187; CHECK-NEXT: ret 188entry: 189 %a = call <vscale x 8 x i8> @llvm.riscv.vslide1up.mask.nxv8i8.i8( 190 <vscale x 8 x i8> %0, 191 <vscale x 8 x i8> %1, 192 i8 %2, 193 <vscale x 8 x i1> %3, 194 iXLen %4, iXLen 1) 195 196 ret <vscale x 8 x i8> %a 197} 198 199declare <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8( 200 <vscale x 16 x i8>, 201 <vscale x 16 x i8>, 202 i8, 203 iXLen) 204 205define <vscale x 16 x i8> @intrinsic_vslide1up_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind { 206; CHECK-LABEL: intrinsic_vslide1up_vx_nxv16i8_nxv16i8_i8: 207; CHECK: # %bb.0: # %entry 208; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 209; CHECK-NEXT: vslide1up.vx v10, v8, a0 210; CHECK-NEXT: vmv.v.v v8, v10 211; CHECK-NEXT: ret 212entry: 213 %a = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8( 214 <vscale x 16 x i8> undef, 215 <vscale x 16 x i8> %0, 216 i8 %1, 217 iXLen %2) 218 219 ret <vscale x 16 x i8> %a 220} 221 222declare <vscale x 16 x i8> @llvm.riscv.vslide1up.mask.nxv16i8.i8( 223 <vscale x 16 x i8>, 224 <vscale x 16 x i8>, 225 i8, 226 <vscale x 16 x i1>, 227 iXLen, 228 iXLen) 229 230define <vscale x 16 x i8> @intrinsic_vslide1up_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 231; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i8_nxv16i8_i8: 232; CHECK: # %bb.0: # %entry 233; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu 234; CHECK-NEXT: vslide1up.vx v8, v10, a0, v0.t 235; CHECK-NEXT: ret 236entry: 237 %a = call <vscale x 16 x i8> @llvm.riscv.vslide1up.mask.nxv16i8.i8( 238 <vscale x 16 x i8> %0, 239 <vscale x 16 x i8> %1, 240 i8 %2, 241 <vscale x 16 x i1> %3, 242 iXLen %4, iXLen 1) 243 244 ret <vscale x 16 x i8> %a 245} 246 247declare <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8( 248 <vscale x 32 x i8>, 249 <vscale x 32 x i8>, 250 i8, 251 iXLen) 252 253define <vscale x 32 x i8> @intrinsic_vslide1up_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind { 254; CHECK-LABEL: intrinsic_vslide1up_vx_nxv32i8_nxv32i8_i8: 255; CHECK: # %bb.0: # %entry 256; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma 257; CHECK-NEXT: vslide1up.vx v12, v8, a0 258; CHECK-NEXT: vmv.v.v v8, v12 259; CHECK-NEXT: ret 260entry: 261 %a = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8( 262 <vscale x 32 x i8> undef, 263 <vscale x 32 x i8> %0, 264 i8 %1, 265 iXLen %2) 266 267 ret <vscale x 32 x i8> %a 268} 269 270declare <vscale x 32 x i8> @llvm.riscv.vslide1up.mask.nxv32i8.i8( 271 <vscale x 32 x i8>, 272 <vscale x 32 x i8>, 273 i8, 274 <vscale x 32 x i1>, 275 iXLen, 276 iXLen) 277 278define <vscale x 32 x i8> @intrinsic_vslide1up_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 279; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv32i8_nxv32i8_i8: 280; CHECK: # %bb.0: # %entry 281; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu 282; CHECK-NEXT: vslide1up.vx v8, v12, a0, v0.t 283; CHECK-NEXT: ret 284entry: 285 %a = call <vscale x 32 x i8> @llvm.riscv.vslide1up.mask.nxv32i8.i8( 286 <vscale x 32 x i8> %0, 287 <vscale x 32 x i8> %1, 288 i8 %2, 289 <vscale x 32 x i1> %3, 290 iXLen %4, iXLen 1) 291 292 ret <vscale x 32 x i8> %a 293} 294 295declare <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8( 296 <vscale x 64 x i8>, 297 <vscale x 64 x i8>, 298 i8, 299 iXLen) 300 301define <vscale x 64 x i8> @intrinsic_vslide1up_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind { 302; CHECK-LABEL: intrinsic_vslide1up_vx_nxv64i8_nxv64i8_i8: 303; CHECK: # %bb.0: # %entry 304; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma 305; CHECK-NEXT: vslide1up.vx v16, v8, a0 306; CHECK-NEXT: vmv.v.v v8, v16 307; CHECK-NEXT: ret 308entry: 309 %a = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8( 310 <vscale x 64 x i8> undef, 311 <vscale x 64 x i8> %0, 312 i8 %1, 313 iXLen %2) 314 315 ret <vscale x 64 x i8> %a 316} 317 318declare <vscale x 64 x i8> @llvm.riscv.vslide1up.mask.nxv64i8.i8( 319 <vscale x 64 x i8>, 320 <vscale x 64 x i8>, 321 i8, 322 <vscale x 64 x i1>, 323 iXLen, 324 iXLen) 325 326define <vscale x 64 x i8> @intrinsic_vslide1up_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind { 327; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv64i8_nxv64i8_i8: 328; CHECK: # %bb.0: # %entry 329; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu 330; CHECK-NEXT: vslide1up.vx v8, v16, a0, v0.t 331; CHECK-NEXT: ret 332entry: 333 %a = call <vscale x 64 x i8> @llvm.riscv.vslide1up.mask.nxv64i8.i8( 334 <vscale x 64 x i8> %0, 335 <vscale x 64 x i8> %1, 336 i8 %2, 337 <vscale x 64 x i1> %3, 338 iXLen %4, iXLen 1) 339 340 ret <vscale x 64 x i8> %a 341} 342 343declare <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16( 344 <vscale x 1 x i16>, 345 <vscale x 1 x i16>, 346 i16, 347 iXLen) 348 349define <vscale x 1 x i16> @intrinsic_vslide1up_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind { 350; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i16_nxv1i16_i16: 351; CHECK: # %bb.0: # %entry 352; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 353; CHECK-NEXT: vslide1up.vx v9, v8, a0 354; CHECK-NEXT: vmv1r.v v8, v9 355; CHECK-NEXT: ret 356entry: 357 %a = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16( 358 <vscale x 1 x i16> undef, 359 <vscale x 1 x i16> %0, 360 i16 %1, 361 iXLen %2) 362 363 ret <vscale x 1 x i16> %a 364} 365 366declare <vscale x 1 x i16> @llvm.riscv.vslide1up.mask.nxv1i16.i16( 367 <vscale x 1 x i16>, 368 <vscale x 1 x i16>, 369 i16, 370 <vscale x 1 x i1>, 371 iXLen, 372 iXLen) 373 374define <vscale x 1 x i16> @intrinsic_vslide1up_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 375; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i16_nxv1i16_i16: 376; CHECK: # %bb.0: # %entry 377; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu 378; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t 379; CHECK-NEXT: ret 380entry: 381 %a = call <vscale x 1 x i16> @llvm.riscv.vslide1up.mask.nxv1i16.i16( 382 <vscale x 1 x i16> %0, 383 <vscale x 1 x i16> %1, 384 i16 %2, 385 <vscale x 1 x i1> %3, 386 iXLen %4, iXLen 1) 387 388 ret <vscale x 1 x i16> %a 389} 390 391declare <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16( 392 <vscale x 2 x i16>, 393 <vscale x 2 x i16>, 394 i16, 395 iXLen) 396 397define <vscale x 2 x i16> @intrinsic_vslide1up_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind { 398; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i16_nxv2i16_i16: 399; CHECK: # %bb.0: # %entry 400; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 401; CHECK-NEXT: vslide1up.vx v9, v8, a0 402; CHECK-NEXT: vmv1r.v v8, v9 403; CHECK-NEXT: ret 404entry: 405 %a = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16( 406 <vscale x 2 x i16> undef, 407 <vscale x 2 x i16> %0, 408 i16 %1, 409 iXLen %2) 410 411 ret <vscale x 2 x i16> %a 412} 413 414declare <vscale x 2 x i16> @llvm.riscv.vslide1up.mask.nxv2i16.i16( 415 <vscale x 2 x i16>, 416 <vscale x 2 x i16>, 417 i16, 418 <vscale x 2 x i1>, 419 iXLen, 420 iXLen) 421 422define <vscale x 2 x i16> @intrinsic_vslide1up_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 423; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i16_nxv2i16_i16: 424; CHECK: # %bb.0: # %entry 425; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu 426; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t 427; CHECK-NEXT: ret 428entry: 429 %a = call <vscale x 2 x i16> @llvm.riscv.vslide1up.mask.nxv2i16.i16( 430 <vscale x 2 x i16> %0, 431 <vscale x 2 x i16> %1, 432 i16 %2, 433 <vscale x 2 x i1> %3, 434 iXLen %4, iXLen 1) 435 436 ret <vscale x 2 x i16> %a 437} 438 439declare <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16( 440 <vscale x 4 x i16>, 441 <vscale x 4 x i16>, 442 i16, 443 iXLen) 444 445define <vscale x 4 x i16> @intrinsic_vslide1up_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind { 446; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i16_nxv4i16_i16: 447; CHECK: # %bb.0: # %entry 448; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 449; CHECK-NEXT: vslide1up.vx v9, v8, a0 450; CHECK-NEXT: vmv.v.v v8, v9 451; CHECK-NEXT: ret 452entry: 453 %a = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16( 454 <vscale x 4 x i16> undef, 455 <vscale x 4 x i16> %0, 456 i16 %1, 457 iXLen %2) 458 459 ret <vscale x 4 x i16> %a 460} 461 462declare <vscale x 4 x i16> @llvm.riscv.vslide1up.mask.nxv4i16.i16( 463 <vscale x 4 x i16>, 464 <vscale x 4 x i16>, 465 i16, 466 <vscale x 4 x i1>, 467 iXLen, 468 iXLen) 469 470define <vscale x 4 x i16> @intrinsic_vslide1up_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 471; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i16_nxv4i16_i16: 472; CHECK: # %bb.0: # %entry 473; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu 474; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t 475; CHECK-NEXT: ret 476entry: 477 %a = call <vscale x 4 x i16> @llvm.riscv.vslide1up.mask.nxv4i16.i16( 478 <vscale x 4 x i16> %0, 479 <vscale x 4 x i16> %1, 480 i16 %2, 481 <vscale x 4 x i1> %3, 482 iXLen %4, iXLen 1) 483 484 ret <vscale x 4 x i16> %a 485} 486 487declare <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16( 488 <vscale x 8 x i16>, 489 <vscale x 8 x i16>, 490 i16, 491 iXLen) 492 493define <vscale x 8 x i16> @intrinsic_vslide1up_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind { 494; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i16_nxv8i16_i16: 495; CHECK: # %bb.0: # %entry 496; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 497; CHECK-NEXT: vslide1up.vx v10, v8, a0 498; CHECK-NEXT: vmv.v.v v8, v10 499; CHECK-NEXT: ret 500entry: 501 %a = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16( 502 <vscale x 8 x i16> undef, 503 <vscale x 8 x i16> %0, 504 i16 %1, 505 iXLen %2) 506 507 ret <vscale x 8 x i16> %a 508} 509 510declare <vscale x 8 x i16> @llvm.riscv.vslide1up.mask.nxv8i16.i16( 511 <vscale x 8 x i16>, 512 <vscale x 8 x i16>, 513 i16, 514 <vscale x 8 x i1>, 515 iXLen, 516 iXLen) 517 518define <vscale x 8 x i16> @intrinsic_vslide1up_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 519; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i16_nxv8i16_i16: 520; CHECK: # %bb.0: # %entry 521; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu 522; CHECK-NEXT: vslide1up.vx v8, v10, a0, v0.t 523; CHECK-NEXT: ret 524entry: 525 %a = call <vscale x 8 x i16> @llvm.riscv.vslide1up.mask.nxv8i16.i16( 526 <vscale x 8 x i16> %0, 527 <vscale x 8 x i16> %1, 528 i16 %2, 529 <vscale x 8 x i1> %3, 530 iXLen %4, iXLen 1) 531 532 ret <vscale x 8 x i16> %a 533} 534 535declare <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16( 536 <vscale x 16 x i16>, 537 <vscale x 16 x i16>, 538 i16, 539 iXLen) 540 541define <vscale x 16 x i16> @intrinsic_vslide1up_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind { 542; CHECK-LABEL: intrinsic_vslide1up_vx_nxv16i16_nxv16i16_i16: 543; CHECK: # %bb.0: # %entry 544; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 545; CHECK-NEXT: vslide1up.vx v12, v8, a0 546; CHECK-NEXT: vmv.v.v v8, v12 547; CHECK-NEXT: ret 548entry: 549 %a = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16( 550 <vscale x 16 x i16> undef, 551 <vscale x 16 x i16> %0, 552 i16 %1, 553 iXLen %2) 554 555 ret <vscale x 16 x i16> %a 556} 557 558declare <vscale x 16 x i16> @llvm.riscv.vslide1up.mask.nxv16i16.i16( 559 <vscale x 16 x i16>, 560 <vscale x 16 x i16>, 561 i16, 562 <vscale x 16 x i1>, 563 iXLen, 564 iXLen) 565 566define <vscale x 16 x i16> @intrinsic_vslide1up_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 567; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i16_nxv16i16_i16: 568; CHECK: # %bb.0: # %entry 569; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu 570; CHECK-NEXT: vslide1up.vx v8, v12, a0, v0.t 571; CHECK-NEXT: ret 572entry: 573 %a = call <vscale x 16 x i16> @llvm.riscv.vslide1up.mask.nxv16i16.i16( 574 <vscale x 16 x i16> %0, 575 <vscale x 16 x i16> %1, 576 i16 %2, 577 <vscale x 16 x i1> %3, 578 iXLen %4, iXLen 1) 579 580 ret <vscale x 16 x i16> %a 581} 582 583declare <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16( 584 <vscale x 32 x i16>, 585 <vscale x 32 x i16>, 586 i16, 587 iXLen) 588 589define <vscale x 32 x i16> @intrinsic_vslide1up_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind { 590; CHECK-LABEL: intrinsic_vslide1up_vx_nxv32i16_nxv32i16_i16: 591; CHECK: # %bb.0: # %entry 592; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma 593; CHECK-NEXT: vslide1up.vx v16, v8, a0 594; CHECK-NEXT: vmv.v.v v8, v16 595; CHECK-NEXT: ret 596entry: 597 %a = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16( 598 <vscale x 32 x i16> undef, 599 <vscale x 32 x i16> %0, 600 i16 %1, 601 iXLen %2) 602 603 ret <vscale x 32 x i16> %a 604} 605 606declare <vscale x 32 x i16> @llvm.riscv.vslide1up.mask.nxv32i16.i16( 607 <vscale x 32 x i16>, 608 <vscale x 32 x i16>, 609 i16, 610 <vscale x 32 x i1>, 611 iXLen, 612 iXLen) 613 614define <vscale x 32 x i16> @intrinsic_vslide1up_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 615; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv32i16_nxv32i16_i16: 616; CHECK: # %bb.0: # %entry 617; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu 618; CHECK-NEXT: vslide1up.vx v8, v16, a0, v0.t 619; CHECK-NEXT: ret 620entry: 621 %a = call <vscale x 32 x i16> @llvm.riscv.vslide1up.mask.nxv32i16.i16( 622 <vscale x 32 x i16> %0, 623 <vscale x 32 x i16> %1, 624 i16 %2, 625 <vscale x 32 x i1> %3, 626 iXLen %4, iXLen 1) 627 628 ret <vscale x 32 x i16> %a 629} 630 631declare <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32( 632 <vscale x 1 x i32>, 633 <vscale x 1 x i32>, 634 i32, 635 iXLen) 636 637define <vscale x 1 x i32> @intrinsic_vslide1up_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind { 638; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i32_nxv1i32_i32: 639; CHECK: # %bb.0: # %entry 640; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 641; CHECK-NEXT: vslide1up.vx v9, v8, a0 642; CHECK-NEXT: vmv1r.v v8, v9 643; CHECK-NEXT: ret 644entry: 645 %a = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32( 646 <vscale x 1 x i32> undef, 647 <vscale x 1 x i32> %0, 648 i32 %1, 649 iXLen %2) 650 651 ret <vscale x 1 x i32> %a 652} 653 654declare <vscale x 1 x i32> @llvm.riscv.vslide1up.mask.nxv1i32.i32( 655 <vscale x 1 x i32>, 656 <vscale x 1 x i32>, 657 i32, 658 <vscale x 1 x i1>, 659 iXLen, 660 iXLen) 661 662define <vscale x 1 x i32> @intrinsic_vslide1up_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 663; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i32_nxv1i32_i32: 664; CHECK: # %bb.0: # %entry 665; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu 666; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t 667; CHECK-NEXT: ret 668entry: 669 %a = call <vscale x 1 x i32> @llvm.riscv.vslide1up.mask.nxv1i32.i32( 670 <vscale x 1 x i32> %0, 671 <vscale x 1 x i32> %1, 672 i32 %2, 673 <vscale x 1 x i1> %3, 674 iXLen %4, iXLen 1) 675 676 ret <vscale x 1 x i32> %a 677} 678 679declare <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32( 680 <vscale x 2 x i32>, 681 <vscale x 2 x i32>, 682 i32, 683 iXLen) 684 685define <vscale x 2 x i32> @intrinsic_vslide1up_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind { 686; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i32_nxv2i32_i32: 687; CHECK: # %bb.0: # %entry 688; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 689; CHECK-NEXT: vslide1up.vx v9, v8, a0 690; CHECK-NEXT: vmv.v.v v8, v9 691; CHECK-NEXT: ret 692entry: 693 %a = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32( 694 <vscale x 2 x i32> undef, 695 <vscale x 2 x i32> %0, 696 i32 %1, 697 iXLen %2) 698 699 ret <vscale x 2 x i32> %a 700} 701 702declare <vscale x 2 x i32> @llvm.riscv.vslide1up.mask.nxv2i32.i32( 703 <vscale x 2 x i32>, 704 <vscale x 2 x i32>, 705 i32, 706 <vscale x 2 x i1>, 707 iXLen, 708 iXLen) 709 710define <vscale x 2 x i32> @intrinsic_vslide1up_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 711; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i32_nxv2i32_i32: 712; CHECK: # %bb.0: # %entry 713; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu 714; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t 715; CHECK-NEXT: ret 716entry: 717 %a = call <vscale x 2 x i32> @llvm.riscv.vslide1up.mask.nxv2i32.i32( 718 <vscale x 2 x i32> %0, 719 <vscale x 2 x i32> %1, 720 i32 %2, 721 <vscale x 2 x i1> %3, 722 iXLen %4, iXLen 1) 723 724 ret <vscale x 2 x i32> %a 725} 726 727declare <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32( 728 <vscale x 4 x i32>, 729 <vscale x 4 x i32>, 730 i32, 731 iXLen) 732 733define <vscale x 4 x i32> @intrinsic_vslide1up_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind { 734; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i32_nxv4i32_i32: 735; CHECK: # %bb.0: # %entry 736; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 737; CHECK-NEXT: vslide1up.vx v10, v8, a0 738; CHECK-NEXT: vmv.v.v v8, v10 739; CHECK-NEXT: ret 740entry: 741 %a = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32( 742 <vscale x 4 x i32> undef, 743 <vscale x 4 x i32> %0, 744 i32 %1, 745 iXLen %2) 746 747 ret <vscale x 4 x i32> %a 748} 749 750declare <vscale x 4 x i32> @llvm.riscv.vslide1up.mask.nxv4i32.i32( 751 <vscale x 4 x i32>, 752 <vscale x 4 x i32>, 753 i32, 754 <vscale x 4 x i1>, 755 iXLen, 756 iXLen) 757 758define <vscale x 4 x i32> @intrinsic_vslide1up_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 759; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i32_nxv4i32_i32: 760; CHECK: # %bb.0: # %entry 761; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu 762; CHECK-NEXT: vslide1up.vx v8, v10, a0, v0.t 763; CHECK-NEXT: ret 764entry: 765 %a = call <vscale x 4 x i32> @llvm.riscv.vslide1up.mask.nxv4i32.i32( 766 <vscale x 4 x i32> %0, 767 <vscale x 4 x i32> %1, 768 i32 %2, 769 <vscale x 4 x i1> %3, 770 iXLen %4, iXLen 1) 771 772 ret <vscale x 4 x i32> %a 773} 774 775declare <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32( 776 <vscale x 8 x i32>, 777 <vscale x 8 x i32>, 778 i32, 779 iXLen) 780 781define <vscale x 8 x i32> @intrinsic_vslide1up_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind { 782; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i32_nxv8i32_i32: 783; CHECK: # %bb.0: # %entry 784; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 785; CHECK-NEXT: vslide1up.vx v12, v8, a0 786; CHECK-NEXT: vmv.v.v v8, v12 787; CHECK-NEXT: ret 788entry: 789 %a = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32( 790 <vscale x 8 x i32> undef, 791 <vscale x 8 x i32> %0, 792 i32 %1, 793 iXLen %2) 794 795 ret <vscale x 8 x i32> %a 796} 797 798declare <vscale x 8 x i32> @llvm.riscv.vslide1up.mask.nxv8i32.i32( 799 <vscale x 8 x i32>, 800 <vscale x 8 x i32>, 801 i32, 802 <vscale x 8 x i1>, 803 iXLen, 804 iXLen) 805 806define <vscale x 8 x i32> @intrinsic_vslide1up_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 807; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i32_nxv8i32_i32: 808; CHECK: # %bb.0: # %entry 809; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu 810; CHECK-NEXT: vslide1up.vx v8, v12, a0, v0.t 811; CHECK-NEXT: ret 812entry: 813 %a = call <vscale x 8 x i32> @llvm.riscv.vslide1up.mask.nxv8i32.i32( 814 <vscale x 8 x i32> %0, 815 <vscale x 8 x i32> %1, 816 i32 %2, 817 <vscale x 8 x i1> %3, 818 iXLen %4, iXLen 1) 819 820 ret <vscale x 8 x i32> %a 821} 822 823declare <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32( 824 <vscale x 16 x i32>, 825 <vscale x 16 x i32>, 826 i32, 827 iXLen) 828 829define <vscale x 16 x i32> @intrinsic_vslide1up_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind { 830; CHECK-LABEL: intrinsic_vslide1up_vx_nxv16i32_nxv16i32_i32: 831; CHECK: # %bb.0: # %entry 832; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma 833; CHECK-NEXT: vslide1up.vx v16, v8, a0 834; CHECK-NEXT: vmv.v.v v8, v16 835; CHECK-NEXT: ret 836entry: 837 %a = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32( 838 <vscale x 16 x i32> undef, 839 <vscale x 16 x i32> %0, 840 i32 %1, 841 iXLen %2) 842 843 ret <vscale x 16 x i32> %a 844} 845 846declare <vscale x 16 x i32> @llvm.riscv.vslide1up.mask.nxv16i32.i32( 847 <vscale x 16 x i32>, 848 <vscale x 16 x i32>, 849 i32, 850 <vscale x 16 x i1>, 851 iXLen, 852 iXLen) 853 854define <vscale x 16 x i32> @intrinsic_vslide1up_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 855; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i32_nxv16i32_i32: 856; CHECK: # %bb.0: # %entry 857; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu 858; CHECK-NEXT: vslide1up.vx v8, v16, a0, v0.t 859; CHECK-NEXT: ret 860entry: 861 %a = call <vscale x 16 x i32> @llvm.riscv.vslide1up.mask.nxv16i32.i32( 862 <vscale x 16 x i32> %0, 863 <vscale x 16 x i32> %1, 864 i32 %2, 865 <vscale x 16 x i1> %3, 866 iXLen %4, iXLen 1) 867 868 ret <vscale x 16 x i32> %a 869} 870 871declare <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64( 872 <vscale x 1 x i64>, 873 <vscale x 1 x i64>, 874 i64, 875 iXLen) 876 877define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind { 878; RV32-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64: 879; RV32: # %bb.0: # %entry 880; RV32-NEXT: vsetvli a2, a2, e64, m1, ta, ma 881; RV32-NEXT: slli a2, a2, 1 882; RV32-NEXT: vsetvli zero, a2, e32, m1, ta, ma 883; RV32-NEXT: vslide1up.vx v9, v8, a1 884; RV32-NEXT: vslide1up.vx v8, v9, a0 885; RV32-NEXT: ret 886; 887; RV64-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64: 888; RV64: # %bb.0: # %entry 889; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma 890; RV64-NEXT: vslide1up.vx v9, v8, a0 891; RV64-NEXT: vmv.v.v v8, v9 892; RV64-NEXT: ret 893entry: 894 %a = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64( 895 <vscale x 1 x i64> undef, 896 <vscale x 1 x i64> %0, 897 i64 %1, 898 iXLen %2) 899 900 ret <vscale x 1 x i64> %a 901} 902 903declare <vscale x 1 x i64> @llvm.riscv.vslide1up.mask.nxv1i64.i64( 904 <vscale x 1 x i64>, 905 <vscale x 1 x i64>, 906 i64, 907 <vscale x 1 x i1>, 908 iXLen, 909 iXLen) 910 911define <vscale x 1 x i64> @intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 912; RV32-LABEL: intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64: 913; RV32: # %bb.0: # %entry 914; RV32-NEXT: vsetvli a3, a2, e64, m1, ta, ma 915; RV32-NEXT: slli a3, a3, 1 916; RV32-NEXT: vsetvli zero, a3, e32, m1, ta, ma 917; RV32-NEXT: vslide1up.vx v10, v9, a1 918; RV32-NEXT: vslide1up.vx v9, v10, a0 919; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma 920; RV32-NEXT: vmerge.vvm v8, v8, v9, v0 921; RV32-NEXT: ret 922; 923; RV64-LABEL: intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64: 924; RV64: # %bb.0: # %entry 925; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu 926; RV64-NEXT: vslide1up.vx v8, v9, a0, v0.t 927; RV64-NEXT: ret 928entry: 929 %a = call <vscale x 1 x i64> @llvm.riscv.vslide1up.mask.nxv1i64.i64( 930 <vscale x 1 x i64> %0, 931 <vscale x 1 x i64> %1, 932 i64 %2, 933 <vscale x 1 x i1> %3, 934 iXLen %4, iXLen 1) 935 936 ret <vscale x 1 x i64> %a 937} 938 939declare <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64( 940 <vscale x 2 x i64>, 941 <vscale x 2 x i64>, 942 i64, 943 iXLen) 944 945define <vscale x 2 x i64> @intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind { 946; RV32-LABEL: intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64: 947; RV32: # %bb.0: # %entry 948; RV32-NEXT: vsetvli a2, a2, e64, m2, ta, ma 949; RV32-NEXT: slli a2, a2, 1 950; RV32-NEXT: vsetvli zero, a2, e32, m2, ta, ma 951; RV32-NEXT: vslide1up.vx v10, v8, a1 952; RV32-NEXT: vslide1up.vx v8, v10, a0 953; RV32-NEXT: ret 954; 955; RV64-LABEL: intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64: 956; RV64: # %bb.0: # %entry 957; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma 958; RV64-NEXT: vslide1up.vx v10, v8, a0 959; RV64-NEXT: vmv.v.v v8, v10 960; RV64-NEXT: ret 961entry: 962 %a = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64( 963 <vscale x 2 x i64> undef, 964 <vscale x 2 x i64> %0, 965 i64 %1, 966 iXLen %2) 967 968 ret <vscale x 2 x i64> %a 969} 970 971declare <vscale x 2 x i64> @llvm.riscv.vslide1up.mask.nxv2i64.i64( 972 <vscale x 2 x i64>, 973 <vscale x 2 x i64>, 974 i64, 975 <vscale x 2 x i1>, 976 iXLen, 977 iXLen) 978 979define <vscale x 2 x i64> @intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 980; RV32-LABEL: intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64: 981; RV32: # %bb.0: # %entry 982; RV32-NEXT: vsetvli a3, a2, e64, m2, ta, ma 983; RV32-NEXT: slli a3, a3, 1 984; RV32-NEXT: vsetvli zero, a3, e32, m2, ta, ma 985; RV32-NEXT: vslide1up.vx v12, v10, a1 986; RV32-NEXT: vslide1up.vx v10, v12, a0 987; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma 988; RV32-NEXT: vmerge.vvm v8, v8, v10, v0 989; RV32-NEXT: ret 990; 991; RV64-LABEL: intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64: 992; RV64: # %bb.0: # %entry 993; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu 994; RV64-NEXT: vslide1up.vx v8, v10, a0, v0.t 995; RV64-NEXT: ret 996entry: 997 %a = call <vscale x 2 x i64> @llvm.riscv.vslide1up.mask.nxv2i64.i64( 998 <vscale x 2 x i64> %0, 999 <vscale x 2 x i64> %1, 1000 i64 %2, 1001 <vscale x 2 x i1> %3, 1002 iXLen %4, iXLen 1) 1003 1004 ret <vscale x 2 x i64> %a 1005} 1006 1007declare <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64( 1008 <vscale x 4 x i64>, 1009 <vscale x 4 x i64>, 1010 i64, 1011 iXLen) 1012 1013define <vscale x 4 x i64> @intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind { 1014; RV32-LABEL: intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64: 1015; RV32: # %bb.0: # %entry 1016; RV32-NEXT: vsetvli a2, a2, e64, m4, ta, ma 1017; RV32-NEXT: slli a2, a2, 1 1018; RV32-NEXT: vsetvli zero, a2, e32, m4, ta, ma 1019; RV32-NEXT: vslide1up.vx v12, v8, a1 1020; RV32-NEXT: vslide1up.vx v8, v12, a0 1021; RV32-NEXT: ret 1022; 1023; RV64-LABEL: intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64: 1024; RV64: # %bb.0: # %entry 1025; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma 1026; RV64-NEXT: vslide1up.vx v12, v8, a0 1027; RV64-NEXT: vmv.v.v v8, v12 1028; RV64-NEXT: ret 1029entry: 1030 %a = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64( 1031 <vscale x 4 x i64> undef, 1032 <vscale x 4 x i64> %0, 1033 i64 %1, 1034 iXLen %2) 1035 1036 ret <vscale x 4 x i64> %a 1037} 1038 1039declare <vscale x 4 x i64> @llvm.riscv.vslide1up.mask.nxv4i64.i64( 1040 <vscale x 4 x i64>, 1041 <vscale x 4 x i64>, 1042 i64, 1043 <vscale x 4 x i1>, 1044 iXLen, 1045 iXLen) 1046 1047define <vscale x 4 x i64> @intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1048; RV32-LABEL: intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64: 1049; RV32: # %bb.0: # %entry 1050; RV32-NEXT: vsetvli a3, a2, e64, m4, ta, ma 1051; RV32-NEXT: slli a3, a3, 1 1052; RV32-NEXT: vsetvli zero, a3, e32, m4, ta, ma 1053; RV32-NEXT: vslide1up.vx v16, v12, a1 1054; RV32-NEXT: vslide1up.vx v12, v16, a0 1055; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma 1056; RV32-NEXT: vmerge.vvm v8, v8, v12, v0 1057; RV32-NEXT: ret 1058; 1059; RV64-LABEL: intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64: 1060; RV64: # %bb.0: # %entry 1061; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu 1062; RV64-NEXT: vslide1up.vx v8, v12, a0, v0.t 1063; RV64-NEXT: ret 1064entry: 1065 %a = call <vscale x 4 x i64> @llvm.riscv.vslide1up.mask.nxv4i64.i64( 1066 <vscale x 4 x i64> %0, 1067 <vscale x 4 x i64> %1, 1068 i64 %2, 1069 <vscale x 4 x i1> %3, 1070 iXLen %4, iXLen 1) 1071 1072 ret <vscale x 4 x i64> %a 1073} 1074 1075declare <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64( 1076 <vscale x 8 x i64>, 1077 <vscale x 8 x i64>, 1078 i64, 1079 iXLen) 1080 1081define <vscale x 8 x i64> @intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind { 1082; RV32-LABEL: intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64: 1083; RV32: # %bb.0: # %entry 1084; RV32-NEXT: vsetvli a2, a2, e64, m8, ta, ma 1085; RV32-NEXT: slli a2, a2, 1 1086; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma 1087; RV32-NEXT: vslide1up.vx v16, v8, a1 1088; RV32-NEXT: vslide1up.vx v8, v16, a0 1089; RV32-NEXT: ret 1090; 1091; RV64-LABEL: intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64: 1092; RV64: # %bb.0: # %entry 1093; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma 1094; RV64-NEXT: vslide1up.vx v16, v8, a0 1095; RV64-NEXT: vmv.v.v v8, v16 1096; RV64-NEXT: ret 1097entry: 1098 %a = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64( 1099 <vscale x 8 x i64> undef, 1100 <vscale x 8 x i64> %0, 1101 i64 %1, 1102 iXLen %2) 1103 1104 ret <vscale x 8 x i64> %a 1105} 1106 1107declare <vscale x 8 x i64> @llvm.riscv.vslide1up.mask.nxv8i64.i64( 1108 <vscale x 8 x i64>, 1109 <vscale x 8 x i64>, 1110 i64, 1111 <vscale x 8 x i1>, 1112 iXLen, 1113 iXLen) 1114 1115define <vscale x 8 x i64> @intrinsic_vslide1up_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1116; RV32-LABEL: intrinsic_vslide1up_mask_vx_nxv8i64_nxv8i64_i64: 1117; RV32: # %bb.0: # %entry 1118; RV32-NEXT: vsetvli a3, a2, e64, m8, ta, ma 1119; RV32-NEXT: slli a3, a3, 1 1120; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma 1121; RV32-NEXT: vslide1up.vx v24, v16, a1 1122; RV32-NEXT: vslide1up.vx v16, v24, a0 1123; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma 1124; RV32-NEXT: vmerge.vvm v8, v8, v16, v0 1125; RV32-NEXT: ret 1126; 1127; RV64-LABEL: intrinsic_vslide1up_mask_vx_nxv8i64_nxv8i64_i64: 1128; RV64: # %bb.0: # %entry 1129; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu 1130; RV64-NEXT: vslide1up.vx v8, v16, a0, v0.t 1131; RV64-NEXT: ret 1132entry: 1133 %a = call <vscale x 8 x i64> @llvm.riscv.vslide1up.mask.nxv8i64.i64( 1134 <vscale x 8 x i64> %0, 1135 <vscale x 8 x i64> %1, 1136 i64 %2, 1137 <vscale x 8 x i1> %3, 1138 iXLen %4, iXLen 1) 1139 1140 ret <vscale x 8 x i64> %a 1141} 1142