1*99fb40d4SCraig Topper; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2*99fb40d4SCraig Topper; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ 3*99fb40d4SCraig Topper; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 4*99fb40d4SCraig Topper; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ 5*99fb40d4SCraig Topper; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 6*99fb40d4SCraig Topper 7*99fb40d4SCraig Topperdeclare <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8( 8*99fb40d4SCraig Topper <vscale x 1 x i8>, 9*99fb40d4SCraig Topper <vscale x 1 x i8>, 10*99fb40d4SCraig Topper i8, 11*99fb40d4SCraig Topper iXLen) 12*99fb40d4SCraig Topper 13*99fb40d4SCraig Topperdefine <vscale x 1 x i8> @intrinsic_vslide1up_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind { 14*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i8_nxv1i8_i8: 15*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 16*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 17*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v9, v8, a0 18*99fb40d4SCraig Topper; CHECK-NEXT: vmv1r.v v8, v9 19*99fb40d4SCraig Topper; CHECK-NEXT: ret 20*99fb40d4SCraig Topperentry: 21*99fb40d4SCraig Topper %a = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8( 22*99fb40d4SCraig Topper <vscale x 1 x i8> undef, 23*99fb40d4SCraig Topper <vscale x 1 x i8> %0, 24*99fb40d4SCraig Topper i8 %1, 25*99fb40d4SCraig Topper iXLen %2) 26*99fb40d4SCraig Topper 27*99fb40d4SCraig Topper ret <vscale x 1 x i8> %a 28*99fb40d4SCraig Topper} 29*99fb40d4SCraig Topper 30*99fb40d4SCraig Topperdeclare <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8( 31*99fb40d4SCraig Topper <vscale x 1 x i8>, 32*99fb40d4SCraig Topper <vscale x 1 x i8>, 33*99fb40d4SCraig Topper i8, 34*99fb40d4SCraig Topper <vscale x 1 x i1>, 35*99fb40d4SCraig Topper iXLen, 36*99fb40d4SCraig Topper iXLen) 37*99fb40d4SCraig Topper 38*99fb40d4SCraig Topperdefine <vscale x 1 x i8> @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 39*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8: 40*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 41*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu 42*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t 43*99fb40d4SCraig Topper; CHECK-NEXT: ret 44*99fb40d4SCraig Topperentry: 45*99fb40d4SCraig Topper %a = call <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8( 46*99fb40d4SCraig Topper <vscale x 1 x i8> %0, 47*99fb40d4SCraig Topper <vscale x 1 x i8> %1, 48*99fb40d4SCraig Topper i8 %2, 49*99fb40d4SCraig Topper <vscale x 1 x i1> %3, 50*99fb40d4SCraig Topper iXLen %4, iXLen 1) 51*99fb40d4SCraig Topper 52*99fb40d4SCraig Topper ret <vscale x 1 x i8> %a 53*99fb40d4SCraig Topper} 54*99fb40d4SCraig Topper 55*99fb40d4SCraig Topperdeclare <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8( 56*99fb40d4SCraig Topper <vscale x 2 x i8>, 57*99fb40d4SCraig Topper <vscale x 2 x i8>, 58*99fb40d4SCraig Topper i8, 59*99fb40d4SCraig Topper iXLen) 60*99fb40d4SCraig Topper 61*99fb40d4SCraig Topperdefine <vscale x 2 x i8> @intrinsic_vslide1up_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind { 62*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i8_nxv2i8_i8: 63*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 64*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 65*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v9, v8, a0 66*99fb40d4SCraig Topper; CHECK-NEXT: vmv1r.v v8, v9 67*99fb40d4SCraig Topper; CHECK-NEXT: ret 68*99fb40d4SCraig Topperentry: 69*99fb40d4SCraig Topper %a = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8( 70*99fb40d4SCraig Topper <vscale x 2 x i8> undef, 71*99fb40d4SCraig Topper <vscale x 2 x i8> %0, 72*99fb40d4SCraig Topper i8 %1, 73*99fb40d4SCraig Topper iXLen %2) 74*99fb40d4SCraig Topper 75*99fb40d4SCraig Topper ret <vscale x 2 x i8> %a 76*99fb40d4SCraig Topper} 77*99fb40d4SCraig Topper 78*99fb40d4SCraig Topperdeclare <vscale x 2 x i8> @llvm.riscv.vslide1up.mask.nxv2i8.i8( 79*99fb40d4SCraig Topper <vscale x 2 x i8>, 80*99fb40d4SCraig Topper <vscale x 2 x i8>, 81*99fb40d4SCraig Topper i8, 82*99fb40d4SCraig Topper <vscale x 2 x i1>, 83*99fb40d4SCraig Topper iXLen, 84*99fb40d4SCraig Topper iXLen) 85*99fb40d4SCraig Topper 86*99fb40d4SCraig Topperdefine <vscale x 2 x i8> @intrinsic_vslide1up_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 87*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i8_nxv2i8_i8: 88*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 89*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu 90*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t 91*99fb40d4SCraig Topper; CHECK-NEXT: ret 92*99fb40d4SCraig Topperentry: 93*99fb40d4SCraig Topper %a = call <vscale x 2 x i8> @llvm.riscv.vslide1up.mask.nxv2i8.i8( 94*99fb40d4SCraig Topper <vscale x 2 x i8> %0, 95*99fb40d4SCraig Topper <vscale x 2 x i8> %1, 96*99fb40d4SCraig Topper i8 %2, 97*99fb40d4SCraig Topper <vscale x 2 x i1> %3, 98*99fb40d4SCraig Topper iXLen %4, iXLen 1) 99*99fb40d4SCraig Topper 100*99fb40d4SCraig Topper ret <vscale x 2 x i8> %a 101*99fb40d4SCraig Topper} 102*99fb40d4SCraig Topper 103*99fb40d4SCraig Topperdeclare <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8( 104*99fb40d4SCraig Topper <vscale x 4 x i8>, 105*99fb40d4SCraig Topper <vscale x 4 x i8>, 106*99fb40d4SCraig Topper i8, 107*99fb40d4SCraig Topper iXLen) 108*99fb40d4SCraig Topper 109*99fb40d4SCraig Topperdefine <vscale x 4 x i8> @intrinsic_vslide1up_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind { 110*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i8_nxv4i8_i8: 111*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 112*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 113*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v9, v8, a0 114*99fb40d4SCraig Topper; CHECK-NEXT: vmv1r.v v8, v9 115*99fb40d4SCraig Topper; CHECK-NEXT: ret 116*99fb40d4SCraig Topperentry: 117*99fb40d4SCraig Topper %a = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8( 118*99fb40d4SCraig Topper <vscale x 4 x i8> undef, 119*99fb40d4SCraig Topper <vscale x 4 x i8> %0, 120*99fb40d4SCraig Topper i8 %1, 121*99fb40d4SCraig Topper iXLen %2) 122*99fb40d4SCraig Topper 123*99fb40d4SCraig Topper ret <vscale x 4 x i8> %a 124*99fb40d4SCraig Topper} 125*99fb40d4SCraig Topper 126*99fb40d4SCraig Topperdeclare <vscale x 4 x i8> @llvm.riscv.vslide1up.mask.nxv4i8.i8( 127*99fb40d4SCraig Topper <vscale x 4 x i8>, 128*99fb40d4SCraig Topper <vscale x 4 x i8>, 129*99fb40d4SCraig Topper i8, 130*99fb40d4SCraig Topper <vscale x 4 x i1>, 131*99fb40d4SCraig Topper iXLen, 132*99fb40d4SCraig Topper iXLen) 133*99fb40d4SCraig Topper 134*99fb40d4SCraig Topperdefine <vscale x 4 x i8> @intrinsic_vslide1up_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 135*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i8_nxv4i8_i8: 136*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 137*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu 138*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t 139*99fb40d4SCraig Topper; CHECK-NEXT: ret 140*99fb40d4SCraig Topperentry: 141*99fb40d4SCraig Topper %a = call <vscale x 4 x i8> @llvm.riscv.vslide1up.mask.nxv4i8.i8( 142*99fb40d4SCraig Topper <vscale x 4 x i8> %0, 143*99fb40d4SCraig Topper <vscale x 4 x i8> %1, 144*99fb40d4SCraig Topper i8 %2, 145*99fb40d4SCraig Topper <vscale x 4 x i1> %3, 146*99fb40d4SCraig Topper iXLen %4, iXLen 1) 147*99fb40d4SCraig Topper 148*99fb40d4SCraig Topper ret <vscale x 4 x i8> %a 149*99fb40d4SCraig Topper} 150*99fb40d4SCraig Topper 151*99fb40d4SCraig Topperdeclare <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8( 152*99fb40d4SCraig Topper <vscale x 8 x i8>, 153*99fb40d4SCraig Topper <vscale x 8 x i8>, 154*99fb40d4SCraig Topper i8, 155*99fb40d4SCraig Topper iXLen) 156*99fb40d4SCraig Topper 157*99fb40d4SCraig Topperdefine <vscale x 8 x i8> @intrinsic_vslide1up_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind { 158*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i8_nxv8i8_i8: 159*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 160*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 161*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v9, v8, a0 162*99fb40d4SCraig Topper; CHECK-NEXT: vmv.v.v v8, v9 163*99fb40d4SCraig Topper; CHECK-NEXT: ret 164*99fb40d4SCraig Topperentry: 165*99fb40d4SCraig Topper %a = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8( 166*99fb40d4SCraig Topper <vscale x 8 x i8> undef, 167*99fb40d4SCraig Topper <vscale x 8 x i8> %0, 168*99fb40d4SCraig Topper i8 %1, 169*99fb40d4SCraig Topper iXLen %2) 170*99fb40d4SCraig Topper 171*99fb40d4SCraig Topper ret <vscale x 8 x i8> %a 172*99fb40d4SCraig Topper} 173*99fb40d4SCraig Topper 174*99fb40d4SCraig Topperdeclare <vscale x 8 x i8> @llvm.riscv.vslide1up.mask.nxv8i8.i8( 175*99fb40d4SCraig Topper <vscale x 8 x i8>, 176*99fb40d4SCraig Topper <vscale x 8 x i8>, 177*99fb40d4SCraig Topper i8, 178*99fb40d4SCraig Topper <vscale x 8 x i1>, 179*99fb40d4SCraig Topper iXLen, 180*99fb40d4SCraig Topper iXLen) 181*99fb40d4SCraig Topper 182*99fb40d4SCraig Topperdefine <vscale x 8 x i8> @intrinsic_vslide1up_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 183*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i8_nxv8i8_i8: 184*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 185*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu 186*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t 187*99fb40d4SCraig Topper; CHECK-NEXT: ret 188*99fb40d4SCraig Topperentry: 189*99fb40d4SCraig Topper %a = call <vscale x 8 x i8> @llvm.riscv.vslide1up.mask.nxv8i8.i8( 190*99fb40d4SCraig Topper <vscale x 8 x i8> %0, 191*99fb40d4SCraig Topper <vscale x 8 x i8> %1, 192*99fb40d4SCraig Topper i8 %2, 193*99fb40d4SCraig Topper <vscale x 8 x i1> %3, 194*99fb40d4SCraig Topper iXLen %4, iXLen 1) 195*99fb40d4SCraig Topper 196*99fb40d4SCraig Topper ret <vscale x 8 x i8> %a 197*99fb40d4SCraig Topper} 198*99fb40d4SCraig Topper 199*99fb40d4SCraig Topperdeclare <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8( 200*99fb40d4SCraig Topper <vscale x 16 x i8>, 201*99fb40d4SCraig Topper <vscale x 16 x i8>, 202*99fb40d4SCraig Topper i8, 203*99fb40d4SCraig Topper iXLen) 204*99fb40d4SCraig Topper 205*99fb40d4SCraig Topperdefine <vscale x 16 x i8> @intrinsic_vslide1up_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind { 206*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_vx_nxv16i8_nxv16i8_i8: 207*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 208*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 209*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v10, v8, a0 210*99fb40d4SCraig Topper; CHECK-NEXT: vmv.v.v v8, v10 211*99fb40d4SCraig Topper; CHECK-NEXT: ret 212*99fb40d4SCraig Topperentry: 213*99fb40d4SCraig Topper %a = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8( 214*99fb40d4SCraig Topper <vscale x 16 x i8> undef, 215*99fb40d4SCraig Topper <vscale x 16 x i8> %0, 216*99fb40d4SCraig Topper i8 %1, 217*99fb40d4SCraig Topper iXLen %2) 218*99fb40d4SCraig Topper 219*99fb40d4SCraig Topper ret <vscale x 16 x i8> %a 220*99fb40d4SCraig Topper} 221*99fb40d4SCraig Topper 222*99fb40d4SCraig Topperdeclare <vscale x 16 x i8> @llvm.riscv.vslide1up.mask.nxv16i8.i8( 223*99fb40d4SCraig Topper <vscale x 16 x i8>, 224*99fb40d4SCraig Topper <vscale x 16 x i8>, 225*99fb40d4SCraig Topper i8, 226*99fb40d4SCraig Topper <vscale x 16 x i1>, 227*99fb40d4SCraig Topper iXLen, 228*99fb40d4SCraig Topper iXLen) 229*99fb40d4SCraig Topper 230*99fb40d4SCraig Topperdefine <vscale x 16 x i8> @intrinsic_vslide1up_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 231*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i8_nxv16i8_i8: 232*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 233*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu 234*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v8, v10, a0, v0.t 235*99fb40d4SCraig Topper; CHECK-NEXT: ret 236*99fb40d4SCraig Topperentry: 237*99fb40d4SCraig Topper %a = call <vscale x 16 x i8> @llvm.riscv.vslide1up.mask.nxv16i8.i8( 238*99fb40d4SCraig Topper <vscale x 16 x i8> %0, 239*99fb40d4SCraig Topper <vscale x 16 x i8> %1, 240*99fb40d4SCraig Topper i8 %2, 241*99fb40d4SCraig Topper <vscale x 16 x i1> %3, 242*99fb40d4SCraig Topper iXLen %4, iXLen 1) 243*99fb40d4SCraig Topper 244*99fb40d4SCraig Topper ret <vscale x 16 x i8> %a 245*99fb40d4SCraig Topper} 246*99fb40d4SCraig Topper 247*99fb40d4SCraig Topperdeclare <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8( 248*99fb40d4SCraig Topper <vscale x 32 x i8>, 249*99fb40d4SCraig Topper <vscale x 32 x i8>, 250*99fb40d4SCraig Topper i8, 251*99fb40d4SCraig Topper iXLen) 252*99fb40d4SCraig Topper 253*99fb40d4SCraig Topperdefine <vscale x 32 x i8> @intrinsic_vslide1up_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind { 254*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_vx_nxv32i8_nxv32i8_i8: 255*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 256*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma 257*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v12, v8, a0 258*99fb40d4SCraig Topper; CHECK-NEXT: vmv.v.v v8, v12 259*99fb40d4SCraig Topper; CHECK-NEXT: ret 260*99fb40d4SCraig Topperentry: 261*99fb40d4SCraig Topper %a = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8( 262*99fb40d4SCraig Topper <vscale x 32 x i8> undef, 263*99fb40d4SCraig Topper <vscale x 32 x i8> %0, 264*99fb40d4SCraig Topper i8 %1, 265*99fb40d4SCraig Topper iXLen %2) 266*99fb40d4SCraig Topper 267*99fb40d4SCraig Topper ret <vscale x 32 x i8> %a 268*99fb40d4SCraig Topper} 269*99fb40d4SCraig Topper 270*99fb40d4SCraig Topperdeclare <vscale x 32 x i8> @llvm.riscv.vslide1up.mask.nxv32i8.i8( 271*99fb40d4SCraig Topper <vscale x 32 x i8>, 272*99fb40d4SCraig Topper <vscale x 32 x i8>, 273*99fb40d4SCraig Topper i8, 274*99fb40d4SCraig Topper <vscale x 32 x i1>, 275*99fb40d4SCraig Topper iXLen, 276*99fb40d4SCraig Topper iXLen) 277*99fb40d4SCraig Topper 278*99fb40d4SCraig Topperdefine <vscale x 32 x i8> @intrinsic_vslide1up_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 279*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv32i8_nxv32i8_i8: 280*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 281*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu 282*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v8, v12, a0, v0.t 283*99fb40d4SCraig Topper; CHECK-NEXT: ret 284*99fb40d4SCraig Topperentry: 285*99fb40d4SCraig Topper %a = call <vscale x 32 x i8> @llvm.riscv.vslide1up.mask.nxv32i8.i8( 286*99fb40d4SCraig Topper <vscale x 32 x i8> %0, 287*99fb40d4SCraig Topper <vscale x 32 x i8> %1, 288*99fb40d4SCraig Topper i8 %2, 289*99fb40d4SCraig Topper <vscale x 32 x i1> %3, 290*99fb40d4SCraig Topper iXLen %4, iXLen 1) 291*99fb40d4SCraig Topper 292*99fb40d4SCraig Topper ret <vscale x 32 x i8> %a 293*99fb40d4SCraig Topper} 294*99fb40d4SCraig Topper 295*99fb40d4SCraig Topperdeclare <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8( 296*99fb40d4SCraig Topper <vscale x 64 x i8>, 297*99fb40d4SCraig Topper <vscale x 64 x i8>, 298*99fb40d4SCraig Topper i8, 299*99fb40d4SCraig Topper iXLen) 300*99fb40d4SCraig Topper 301*99fb40d4SCraig Topperdefine <vscale x 64 x i8> @intrinsic_vslide1up_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind { 302*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_vx_nxv64i8_nxv64i8_i8: 303*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 304*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma 305*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v16, v8, a0 306*99fb40d4SCraig Topper; CHECK-NEXT: vmv.v.v v8, v16 307*99fb40d4SCraig Topper; CHECK-NEXT: ret 308*99fb40d4SCraig Topperentry: 309*99fb40d4SCraig Topper %a = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8( 310*99fb40d4SCraig Topper <vscale x 64 x i8> undef, 311*99fb40d4SCraig Topper <vscale x 64 x i8> %0, 312*99fb40d4SCraig Topper i8 %1, 313*99fb40d4SCraig Topper iXLen %2) 314*99fb40d4SCraig Topper 315*99fb40d4SCraig Topper ret <vscale x 64 x i8> %a 316*99fb40d4SCraig Topper} 317*99fb40d4SCraig Topper 318*99fb40d4SCraig Topperdeclare <vscale x 64 x i8> @llvm.riscv.vslide1up.mask.nxv64i8.i8( 319*99fb40d4SCraig Topper <vscale x 64 x i8>, 320*99fb40d4SCraig Topper <vscale x 64 x i8>, 321*99fb40d4SCraig Topper i8, 322*99fb40d4SCraig Topper <vscale x 64 x i1>, 323*99fb40d4SCraig Topper iXLen, 324*99fb40d4SCraig Topper iXLen) 325*99fb40d4SCraig Topper 326*99fb40d4SCraig Topperdefine <vscale x 64 x i8> @intrinsic_vslide1up_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind { 327*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv64i8_nxv64i8_i8: 328*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 329*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu 330*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v8, v16, a0, v0.t 331*99fb40d4SCraig Topper; CHECK-NEXT: ret 332*99fb40d4SCraig Topperentry: 333*99fb40d4SCraig Topper %a = call <vscale x 64 x i8> @llvm.riscv.vslide1up.mask.nxv64i8.i8( 334*99fb40d4SCraig Topper <vscale x 64 x i8> %0, 335*99fb40d4SCraig Topper <vscale x 64 x i8> %1, 336*99fb40d4SCraig Topper i8 %2, 337*99fb40d4SCraig Topper <vscale x 64 x i1> %3, 338*99fb40d4SCraig Topper iXLen %4, iXLen 1) 339*99fb40d4SCraig Topper 340*99fb40d4SCraig Topper ret <vscale x 64 x i8> %a 341*99fb40d4SCraig Topper} 342*99fb40d4SCraig Topper 343*99fb40d4SCraig Topperdeclare <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16( 344*99fb40d4SCraig Topper <vscale x 1 x i16>, 345*99fb40d4SCraig Topper <vscale x 1 x i16>, 346*99fb40d4SCraig Topper i16, 347*99fb40d4SCraig Topper iXLen) 348*99fb40d4SCraig Topper 349*99fb40d4SCraig Topperdefine <vscale x 1 x i16> @intrinsic_vslide1up_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind { 350*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i16_nxv1i16_i16: 351*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 352*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 353*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v9, v8, a0 354*99fb40d4SCraig Topper; CHECK-NEXT: vmv1r.v v8, v9 355*99fb40d4SCraig Topper; CHECK-NEXT: ret 356*99fb40d4SCraig Topperentry: 357*99fb40d4SCraig Topper %a = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16( 358*99fb40d4SCraig Topper <vscale x 1 x i16> undef, 359*99fb40d4SCraig Topper <vscale x 1 x i16> %0, 360*99fb40d4SCraig Topper i16 %1, 361*99fb40d4SCraig Topper iXLen %2) 362*99fb40d4SCraig Topper 363*99fb40d4SCraig Topper ret <vscale x 1 x i16> %a 364*99fb40d4SCraig Topper} 365*99fb40d4SCraig Topper 366*99fb40d4SCraig Topperdeclare <vscale x 1 x i16> @llvm.riscv.vslide1up.mask.nxv1i16.i16( 367*99fb40d4SCraig Topper <vscale x 1 x i16>, 368*99fb40d4SCraig Topper <vscale x 1 x i16>, 369*99fb40d4SCraig Topper i16, 370*99fb40d4SCraig Topper <vscale x 1 x i1>, 371*99fb40d4SCraig Topper iXLen, 372*99fb40d4SCraig Topper iXLen) 373*99fb40d4SCraig Topper 374*99fb40d4SCraig Topperdefine <vscale x 1 x i16> @intrinsic_vslide1up_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 375*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i16_nxv1i16_i16: 376*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 377*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu 378*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t 379*99fb40d4SCraig Topper; CHECK-NEXT: ret 380*99fb40d4SCraig Topperentry: 381*99fb40d4SCraig Topper %a = call <vscale x 1 x i16> @llvm.riscv.vslide1up.mask.nxv1i16.i16( 382*99fb40d4SCraig Topper <vscale x 1 x i16> %0, 383*99fb40d4SCraig Topper <vscale x 1 x i16> %1, 384*99fb40d4SCraig Topper i16 %2, 385*99fb40d4SCraig Topper <vscale x 1 x i1> %3, 386*99fb40d4SCraig Topper iXLen %4, iXLen 1) 387*99fb40d4SCraig Topper 388*99fb40d4SCraig Topper ret <vscale x 1 x i16> %a 389*99fb40d4SCraig Topper} 390*99fb40d4SCraig Topper 391*99fb40d4SCraig Topperdeclare <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16( 392*99fb40d4SCraig Topper <vscale x 2 x i16>, 393*99fb40d4SCraig Topper <vscale x 2 x i16>, 394*99fb40d4SCraig Topper i16, 395*99fb40d4SCraig Topper iXLen) 396*99fb40d4SCraig Topper 397*99fb40d4SCraig Topperdefine <vscale x 2 x i16> @intrinsic_vslide1up_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind { 398*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i16_nxv2i16_i16: 399*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 400*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 401*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v9, v8, a0 402*99fb40d4SCraig Topper; CHECK-NEXT: vmv1r.v v8, v9 403*99fb40d4SCraig Topper; CHECK-NEXT: ret 404*99fb40d4SCraig Topperentry: 405*99fb40d4SCraig Topper %a = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16( 406*99fb40d4SCraig Topper <vscale x 2 x i16> undef, 407*99fb40d4SCraig Topper <vscale x 2 x i16> %0, 408*99fb40d4SCraig Topper i16 %1, 409*99fb40d4SCraig Topper iXLen %2) 410*99fb40d4SCraig Topper 411*99fb40d4SCraig Topper ret <vscale x 2 x i16> %a 412*99fb40d4SCraig Topper} 413*99fb40d4SCraig Topper 414*99fb40d4SCraig Topperdeclare <vscale x 2 x i16> @llvm.riscv.vslide1up.mask.nxv2i16.i16( 415*99fb40d4SCraig Topper <vscale x 2 x i16>, 416*99fb40d4SCraig Topper <vscale x 2 x i16>, 417*99fb40d4SCraig Topper i16, 418*99fb40d4SCraig Topper <vscale x 2 x i1>, 419*99fb40d4SCraig Topper iXLen, 420*99fb40d4SCraig Topper iXLen) 421*99fb40d4SCraig Topper 422*99fb40d4SCraig Topperdefine <vscale x 2 x i16> @intrinsic_vslide1up_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 423*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i16_nxv2i16_i16: 424*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 425*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu 426*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t 427*99fb40d4SCraig Topper; CHECK-NEXT: ret 428*99fb40d4SCraig Topperentry: 429*99fb40d4SCraig Topper %a = call <vscale x 2 x i16> @llvm.riscv.vslide1up.mask.nxv2i16.i16( 430*99fb40d4SCraig Topper <vscale x 2 x i16> %0, 431*99fb40d4SCraig Topper <vscale x 2 x i16> %1, 432*99fb40d4SCraig Topper i16 %2, 433*99fb40d4SCraig Topper <vscale x 2 x i1> %3, 434*99fb40d4SCraig Topper iXLen %4, iXLen 1) 435*99fb40d4SCraig Topper 436*99fb40d4SCraig Topper ret <vscale x 2 x i16> %a 437*99fb40d4SCraig Topper} 438*99fb40d4SCraig Topper 439*99fb40d4SCraig Topperdeclare <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16( 440*99fb40d4SCraig Topper <vscale x 4 x i16>, 441*99fb40d4SCraig Topper <vscale x 4 x i16>, 442*99fb40d4SCraig Topper i16, 443*99fb40d4SCraig Topper iXLen) 444*99fb40d4SCraig Topper 445*99fb40d4SCraig Topperdefine <vscale x 4 x i16> @intrinsic_vslide1up_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind { 446*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i16_nxv4i16_i16: 447*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 448*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 449*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v9, v8, a0 450*99fb40d4SCraig Topper; CHECK-NEXT: vmv.v.v v8, v9 451*99fb40d4SCraig Topper; CHECK-NEXT: ret 452*99fb40d4SCraig Topperentry: 453*99fb40d4SCraig Topper %a = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16( 454*99fb40d4SCraig Topper <vscale x 4 x i16> undef, 455*99fb40d4SCraig Topper <vscale x 4 x i16> %0, 456*99fb40d4SCraig Topper i16 %1, 457*99fb40d4SCraig Topper iXLen %2) 458*99fb40d4SCraig Topper 459*99fb40d4SCraig Topper ret <vscale x 4 x i16> %a 460*99fb40d4SCraig Topper} 461*99fb40d4SCraig Topper 462*99fb40d4SCraig Topperdeclare <vscale x 4 x i16> @llvm.riscv.vslide1up.mask.nxv4i16.i16( 463*99fb40d4SCraig Topper <vscale x 4 x i16>, 464*99fb40d4SCraig Topper <vscale x 4 x i16>, 465*99fb40d4SCraig Topper i16, 466*99fb40d4SCraig Topper <vscale x 4 x i1>, 467*99fb40d4SCraig Topper iXLen, 468*99fb40d4SCraig Topper iXLen) 469*99fb40d4SCraig Topper 470*99fb40d4SCraig Topperdefine <vscale x 4 x i16> @intrinsic_vslide1up_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 471*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i16_nxv4i16_i16: 472*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 473*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu 474*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t 475*99fb40d4SCraig Topper; CHECK-NEXT: ret 476*99fb40d4SCraig Topperentry: 477*99fb40d4SCraig Topper %a = call <vscale x 4 x i16> @llvm.riscv.vslide1up.mask.nxv4i16.i16( 478*99fb40d4SCraig Topper <vscale x 4 x i16> %0, 479*99fb40d4SCraig Topper <vscale x 4 x i16> %1, 480*99fb40d4SCraig Topper i16 %2, 481*99fb40d4SCraig Topper <vscale x 4 x i1> %3, 482*99fb40d4SCraig Topper iXLen %4, iXLen 1) 483*99fb40d4SCraig Topper 484*99fb40d4SCraig Topper ret <vscale x 4 x i16> %a 485*99fb40d4SCraig Topper} 486*99fb40d4SCraig Topper 487*99fb40d4SCraig Topperdeclare <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16( 488*99fb40d4SCraig Topper <vscale x 8 x i16>, 489*99fb40d4SCraig Topper <vscale x 8 x i16>, 490*99fb40d4SCraig Topper i16, 491*99fb40d4SCraig Topper iXLen) 492*99fb40d4SCraig Topper 493*99fb40d4SCraig Topperdefine <vscale x 8 x i16> @intrinsic_vslide1up_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind { 494*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i16_nxv8i16_i16: 495*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 496*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 497*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v10, v8, a0 498*99fb40d4SCraig Topper; CHECK-NEXT: vmv.v.v v8, v10 499*99fb40d4SCraig Topper; CHECK-NEXT: ret 500*99fb40d4SCraig Topperentry: 501*99fb40d4SCraig Topper %a = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16( 502*99fb40d4SCraig Topper <vscale x 8 x i16> undef, 503*99fb40d4SCraig Topper <vscale x 8 x i16> %0, 504*99fb40d4SCraig Topper i16 %1, 505*99fb40d4SCraig Topper iXLen %2) 506*99fb40d4SCraig Topper 507*99fb40d4SCraig Topper ret <vscale x 8 x i16> %a 508*99fb40d4SCraig Topper} 509*99fb40d4SCraig Topper 510*99fb40d4SCraig Topperdeclare <vscale x 8 x i16> @llvm.riscv.vslide1up.mask.nxv8i16.i16( 511*99fb40d4SCraig Topper <vscale x 8 x i16>, 512*99fb40d4SCraig Topper <vscale x 8 x i16>, 513*99fb40d4SCraig Topper i16, 514*99fb40d4SCraig Topper <vscale x 8 x i1>, 515*99fb40d4SCraig Topper iXLen, 516*99fb40d4SCraig Topper iXLen) 517*99fb40d4SCraig Topper 518*99fb40d4SCraig Topperdefine <vscale x 8 x i16> @intrinsic_vslide1up_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 519*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i16_nxv8i16_i16: 520*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 521*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu 522*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v8, v10, a0, v0.t 523*99fb40d4SCraig Topper; CHECK-NEXT: ret 524*99fb40d4SCraig Topperentry: 525*99fb40d4SCraig Topper %a = call <vscale x 8 x i16> @llvm.riscv.vslide1up.mask.nxv8i16.i16( 526*99fb40d4SCraig Topper <vscale x 8 x i16> %0, 527*99fb40d4SCraig Topper <vscale x 8 x i16> %1, 528*99fb40d4SCraig Topper i16 %2, 529*99fb40d4SCraig Topper <vscale x 8 x i1> %3, 530*99fb40d4SCraig Topper iXLen %4, iXLen 1) 531*99fb40d4SCraig Topper 532*99fb40d4SCraig Topper ret <vscale x 8 x i16> %a 533*99fb40d4SCraig Topper} 534*99fb40d4SCraig Topper 535*99fb40d4SCraig Topperdeclare <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16( 536*99fb40d4SCraig Topper <vscale x 16 x i16>, 537*99fb40d4SCraig Topper <vscale x 16 x i16>, 538*99fb40d4SCraig Topper i16, 539*99fb40d4SCraig Topper iXLen) 540*99fb40d4SCraig Topper 541*99fb40d4SCraig Topperdefine <vscale x 16 x i16> @intrinsic_vslide1up_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind { 542*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_vx_nxv16i16_nxv16i16_i16: 543*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 544*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 545*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v12, v8, a0 546*99fb40d4SCraig Topper; CHECK-NEXT: vmv.v.v v8, v12 547*99fb40d4SCraig Topper; CHECK-NEXT: ret 548*99fb40d4SCraig Topperentry: 549*99fb40d4SCraig Topper %a = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16( 550*99fb40d4SCraig Topper <vscale x 16 x i16> undef, 551*99fb40d4SCraig Topper <vscale x 16 x i16> %0, 552*99fb40d4SCraig Topper i16 %1, 553*99fb40d4SCraig Topper iXLen %2) 554*99fb40d4SCraig Topper 555*99fb40d4SCraig Topper ret <vscale x 16 x i16> %a 556*99fb40d4SCraig Topper} 557*99fb40d4SCraig Topper 558*99fb40d4SCraig Topperdeclare <vscale x 16 x i16> @llvm.riscv.vslide1up.mask.nxv16i16.i16( 559*99fb40d4SCraig Topper <vscale x 16 x i16>, 560*99fb40d4SCraig Topper <vscale x 16 x i16>, 561*99fb40d4SCraig Topper i16, 562*99fb40d4SCraig Topper <vscale x 16 x i1>, 563*99fb40d4SCraig Topper iXLen, 564*99fb40d4SCraig Topper iXLen) 565*99fb40d4SCraig Topper 566*99fb40d4SCraig Topperdefine <vscale x 16 x i16> @intrinsic_vslide1up_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 567*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i16_nxv16i16_i16: 568*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 569*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu 570*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v8, v12, a0, v0.t 571*99fb40d4SCraig Topper; CHECK-NEXT: ret 572*99fb40d4SCraig Topperentry: 573*99fb40d4SCraig Topper %a = call <vscale x 16 x i16> @llvm.riscv.vslide1up.mask.nxv16i16.i16( 574*99fb40d4SCraig Topper <vscale x 16 x i16> %0, 575*99fb40d4SCraig Topper <vscale x 16 x i16> %1, 576*99fb40d4SCraig Topper i16 %2, 577*99fb40d4SCraig Topper <vscale x 16 x i1> %3, 578*99fb40d4SCraig Topper iXLen %4, iXLen 1) 579*99fb40d4SCraig Topper 580*99fb40d4SCraig Topper ret <vscale x 16 x i16> %a 581*99fb40d4SCraig Topper} 582*99fb40d4SCraig Topper 583*99fb40d4SCraig Topperdeclare <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16( 584*99fb40d4SCraig Topper <vscale x 32 x i16>, 585*99fb40d4SCraig Topper <vscale x 32 x i16>, 586*99fb40d4SCraig Topper i16, 587*99fb40d4SCraig Topper iXLen) 588*99fb40d4SCraig Topper 589*99fb40d4SCraig Topperdefine <vscale x 32 x i16> @intrinsic_vslide1up_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind { 590*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_vx_nxv32i16_nxv32i16_i16: 591*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 592*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma 593*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v16, v8, a0 594*99fb40d4SCraig Topper; CHECK-NEXT: vmv.v.v v8, v16 595*99fb40d4SCraig Topper; CHECK-NEXT: ret 596*99fb40d4SCraig Topperentry: 597*99fb40d4SCraig Topper %a = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16( 598*99fb40d4SCraig Topper <vscale x 32 x i16> undef, 599*99fb40d4SCraig Topper <vscale x 32 x i16> %0, 600*99fb40d4SCraig Topper i16 %1, 601*99fb40d4SCraig Topper iXLen %2) 602*99fb40d4SCraig Topper 603*99fb40d4SCraig Topper ret <vscale x 32 x i16> %a 604*99fb40d4SCraig Topper} 605*99fb40d4SCraig Topper 606*99fb40d4SCraig Topperdeclare <vscale x 32 x i16> @llvm.riscv.vslide1up.mask.nxv32i16.i16( 607*99fb40d4SCraig Topper <vscale x 32 x i16>, 608*99fb40d4SCraig Topper <vscale x 32 x i16>, 609*99fb40d4SCraig Topper i16, 610*99fb40d4SCraig Topper <vscale x 32 x i1>, 611*99fb40d4SCraig Topper iXLen, 612*99fb40d4SCraig Topper iXLen) 613*99fb40d4SCraig Topper 614*99fb40d4SCraig Topperdefine <vscale x 32 x i16> @intrinsic_vslide1up_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 615*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv32i16_nxv32i16_i16: 616*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 617*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu 618*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v8, v16, a0, v0.t 619*99fb40d4SCraig Topper; CHECK-NEXT: ret 620*99fb40d4SCraig Topperentry: 621*99fb40d4SCraig Topper %a = call <vscale x 32 x i16> @llvm.riscv.vslide1up.mask.nxv32i16.i16( 622*99fb40d4SCraig Topper <vscale x 32 x i16> %0, 623*99fb40d4SCraig Topper <vscale x 32 x i16> %1, 624*99fb40d4SCraig Topper i16 %2, 625*99fb40d4SCraig Topper <vscale x 32 x i1> %3, 626*99fb40d4SCraig Topper iXLen %4, iXLen 1) 627*99fb40d4SCraig Topper 628*99fb40d4SCraig Topper ret <vscale x 32 x i16> %a 629*99fb40d4SCraig Topper} 630*99fb40d4SCraig Topper 631*99fb40d4SCraig Topperdeclare <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32( 632*99fb40d4SCraig Topper <vscale x 1 x i32>, 633*99fb40d4SCraig Topper <vscale x 1 x i32>, 634*99fb40d4SCraig Topper i32, 635*99fb40d4SCraig Topper iXLen) 636*99fb40d4SCraig Topper 637*99fb40d4SCraig Topperdefine <vscale x 1 x i32> @intrinsic_vslide1up_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind { 638*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i32_nxv1i32_i32: 639*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 640*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 641*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v9, v8, a0 642*99fb40d4SCraig Topper; CHECK-NEXT: vmv1r.v v8, v9 643*99fb40d4SCraig Topper; CHECK-NEXT: ret 644*99fb40d4SCraig Topperentry: 645*99fb40d4SCraig Topper %a = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32( 646*99fb40d4SCraig Topper <vscale x 1 x i32> undef, 647*99fb40d4SCraig Topper <vscale x 1 x i32> %0, 648*99fb40d4SCraig Topper i32 %1, 649*99fb40d4SCraig Topper iXLen %2) 650*99fb40d4SCraig Topper 651*99fb40d4SCraig Topper ret <vscale x 1 x i32> %a 652*99fb40d4SCraig Topper} 653*99fb40d4SCraig Topper 654*99fb40d4SCraig Topperdeclare <vscale x 1 x i32> @llvm.riscv.vslide1up.mask.nxv1i32.i32( 655*99fb40d4SCraig Topper <vscale x 1 x i32>, 656*99fb40d4SCraig Topper <vscale x 1 x i32>, 657*99fb40d4SCraig Topper i32, 658*99fb40d4SCraig Topper <vscale x 1 x i1>, 659*99fb40d4SCraig Topper iXLen, 660*99fb40d4SCraig Topper iXLen) 661*99fb40d4SCraig Topper 662*99fb40d4SCraig Topperdefine <vscale x 1 x i32> @intrinsic_vslide1up_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 663*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i32_nxv1i32_i32: 664*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 665*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu 666*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t 667*99fb40d4SCraig Topper; CHECK-NEXT: ret 668*99fb40d4SCraig Topperentry: 669*99fb40d4SCraig Topper %a = call <vscale x 1 x i32> @llvm.riscv.vslide1up.mask.nxv1i32.i32( 670*99fb40d4SCraig Topper <vscale x 1 x i32> %0, 671*99fb40d4SCraig Topper <vscale x 1 x i32> %1, 672*99fb40d4SCraig Topper i32 %2, 673*99fb40d4SCraig Topper <vscale x 1 x i1> %3, 674*99fb40d4SCraig Topper iXLen %4, iXLen 1) 675*99fb40d4SCraig Topper 676*99fb40d4SCraig Topper ret <vscale x 1 x i32> %a 677*99fb40d4SCraig Topper} 678*99fb40d4SCraig Topper 679*99fb40d4SCraig Topperdeclare <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32( 680*99fb40d4SCraig Topper <vscale x 2 x i32>, 681*99fb40d4SCraig Topper <vscale x 2 x i32>, 682*99fb40d4SCraig Topper i32, 683*99fb40d4SCraig Topper iXLen) 684*99fb40d4SCraig Topper 685*99fb40d4SCraig Topperdefine <vscale x 2 x i32> @intrinsic_vslide1up_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind { 686*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i32_nxv2i32_i32: 687*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 688*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 689*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v9, v8, a0 690*99fb40d4SCraig Topper; CHECK-NEXT: vmv.v.v v8, v9 691*99fb40d4SCraig Topper; CHECK-NEXT: ret 692*99fb40d4SCraig Topperentry: 693*99fb40d4SCraig Topper %a = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32( 694*99fb40d4SCraig Topper <vscale x 2 x i32> undef, 695*99fb40d4SCraig Topper <vscale x 2 x i32> %0, 696*99fb40d4SCraig Topper i32 %1, 697*99fb40d4SCraig Topper iXLen %2) 698*99fb40d4SCraig Topper 699*99fb40d4SCraig Topper ret <vscale x 2 x i32> %a 700*99fb40d4SCraig Topper} 701*99fb40d4SCraig Topper 702*99fb40d4SCraig Topperdeclare <vscale x 2 x i32> @llvm.riscv.vslide1up.mask.nxv2i32.i32( 703*99fb40d4SCraig Topper <vscale x 2 x i32>, 704*99fb40d4SCraig Topper <vscale x 2 x i32>, 705*99fb40d4SCraig Topper i32, 706*99fb40d4SCraig Topper <vscale x 2 x i1>, 707*99fb40d4SCraig Topper iXLen, 708*99fb40d4SCraig Topper iXLen) 709*99fb40d4SCraig Topper 710*99fb40d4SCraig Topperdefine <vscale x 2 x i32> @intrinsic_vslide1up_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 711*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i32_nxv2i32_i32: 712*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 713*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu 714*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t 715*99fb40d4SCraig Topper; CHECK-NEXT: ret 716*99fb40d4SCraig Topperentry: 717*99fb40d4SCraig Topper %a = call <vscale x 2 x i32> @llvm.riscv.vslide1up.mask.nxv2i32.i32( 718*99fb40d4SCraig Topper <vscale x 2 x i32> %0, 719*99fb40d4SCraig Topper <vscale x 2 x i32> %1, 720*99fb40d4SCraig Topper i32 %2, 721*99fb40d4SCraig Topper <vscale x 2 x i1> %3, 722*99fb40d4SCraig Topper iXLen %4, iXLen 1) 723*99fb40d4SCraig Topper 724*99fb40d4SCraig Topper ret <vscale x 2 x i32> %a 725*99fb40d4SCraig Topper} 726*99fb40d4SCraig Topper 727*99fb40d4SCraig Topperdeclare <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32( 728*99fb40d4SCraig Topper <vscale x 4 x i32>, 729*99fb40d4SCraig Topper <vscale x 4 x i32>, 730*99fb40d4SCraig Topper i32, 731*99fb40d4SCraig Topper iXLen) 732*99fb40d4SCraig Topper 733*99fb40d4SCraig Topperdefine <vscale x 4 x i32> @intrinsic_vslide1up_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind { 734*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i32_nxv4i32_i32: 735*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 736*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 737*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v10, v8, a0 738*99fb40d4SCraig Topper; CHECK-NEXT: vmv.v.v v8, v10 739*99fb40d4SCraig Topper; CHECK-NEXT: ret 740*99fb40d4SCraig Topperentry: 741*99fb40d4SCraig Topper %a = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32( 742*99fb40d4SCraig Topper <vscale x 4 x i32> undef, 743*99fb40d4SCraig Topper <vscale x 4 x i32> %0, 744*99fb40d4SCraig Topper i32 %1, 745*99fb40d4SCraig Topper iXLen %2) 746*99fb40d4SCraig Topper 747*99fb40d4SCraig Topper ret <vscale x 4 x i32> %a 748*99fb40d4SCraig Topper} 749*99fb40d4SCraig Topper 750*99fb40d4SCraig Topperdeclare <vscale x 4 x i32> @llvm.riscv.vslide1up.mask.nxv4i32.i32( 751*99fb40d4SCraig Topper <vscale x 4 x i32>, 752*99fb40d4SCraig Topper <vscale x 4 x i32>, 753*99fb40d4SCraig Topper i32, 754*99fb40d4SCraig Topper <vscale x 4 x i1>, 755*99fb40d4SCraig Topper iXLen, 756*99fb40d4SCraig Topper iXLen) 757*99fb40d4SCraig Topper 758*99fb40d4SCraig Topperdefine <vscale x 4 x i32> @intrinsic_vslide1up_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 759*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i32_nxv4i32_i32: 760*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 761*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu 762*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v8, v10, a0, v0.t 763*99fb40d4SCraig Topper; CHECK-NEXT: ret 764*99fb40d4SCraig Topperentry: 765*99fb40d4SCraig Topper %a = call <vscale x 4 x i32> @llvm.riscv.vslide1up.mask.nxv4i32.i32( 766*99fb40d4SCraig Topper <vscale x 4 x i32> %0, 767*99fb40d4SCraig Topper <vscale x 4 x i32> %1, 768*99fb40d4SCraig Topper i32 %2, 769*99fb40d4SCraig Topper <vscale x 4 x i1> %3, 770*99fb40d4SCraig Topper iXLen %4, iXLen 1) 771*99fb40d4SCraig Topper 772*99fb40d4SCraig Topper ret <vscale x 4 x i32> %a 773*99fb40d4SCraig Topper} 774*99fb40d4SCraig Topper 775*99fb40d4SCraig Topperdeclare <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32( 776*99fb40d4SCraig Topper <vscale x 8 x i32>, 777*99fb40d4SCraig Topper <vscale x 8 x i32>, 778*99fb40d4SCraig Topper i32, 779*99fb40d4SCraig Topper iXLen) 780*99fb40d4SCraig Topper 781*99fb40d4SCraig Topperdefine <vscale x 8 x i32> @intrinsic_vslide1up_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind { 782*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i32_nxv8i32_i32: 783*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 784*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 785*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v12, v8, a0 786*99fb40d4SCraig Topper; CHECK-NEXT: vmv.v.v v8, v12 787*99fb40d4SCraig Topper; CHECK-NEXT: ret 788*99fb40d4SCraig Topperentry: 789*99fb40d4SCraig Topper %a = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32( 790*99fb40d4SCraig Topper <vscale x 8 x i32> undef, 791*99fb40d4SCraig Topper <vscale x 8 x i32> %0, 792*99fb40d4SCraig Topper i32 %1, 793*99fb40d4SCraig Topper iXLen %2) 794*99fb40d4SCraig Topper 795*99fb40d4SCraig Topper ret <vscale x 8 x i32> %a 796*99fb40d4SCraig Topper} 797*99fb40d4SCraig Topper 798*99fb40d4SCraig Topperdeclare <vscale x 8 x i32> @llvm.riscv.vslide1up.mask.nxv8i32.i32( 799*99fb40d4SCraig Topper <vscale x 8 x i32>, 800*99fb40d4SCraig Topper <vscale x 8 x i32>, 801*99fb40d4SCraig Topper i32, 802*99fb40d4SCraig Topper <vscale x 8 x i1>, 803*99fb40d4SCraig Topper iXLen, 804*99fb40d4SCraig Topper iXLen) 805*99fb40d4SCraig Topper 806*99fb40d4SCraig Topperdefine <vscale x 8 x i32> @intrinsic_vslide1up_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 807*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i32_nxv8i32_i32: 808*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 809*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu 810*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v8, v12, a0, v0.t 811*99fb40d4SCraig Topper; CHECK-NEXT: ret 812*99fb40d4SCraig Topperentry: 813*99fb40d4SCraig Topper %a = call <vscale x 8 x i32> @llvm.riscv.vslide1up.mask.nxv8i32.i32( 814*99fb40d4SCraig Topper <vscale x 8 x i32> %0, 815*99fb40d4SCraig Topper <vscale x 8 x i32> %1, 816*99fb40d4SCraig Topper i32 %2, 817*99fb40d4SCraig Topper <vscale x 8 x i1> %3, 818*99fb40d4SCraig Topper iXLen %4, iXLen 1) 819*99fb40d4SCraig Topper 820*99fb40d4SCraig Topper ret <vscale x 8 x i32> %a 821*99fb40d4SCraig Topper} 822*99fb40d4SCraig Topper 823*99fb40d4SCraig Topperdeclare <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32( 824*99fb40d4SCraig Topper <vscale x 16 x i32>, 825*99fb40d4SCraig Topper <vscale x 16 x i32>, 826*99fb40d4SCraig Topper i32, 827*99fb40d4SCraig Topper iXLen) 828*99fb40d4SCraig Topper 829*99fb40d4SCraig Topperdefine <vscale x 16 x i32> @intrinsic_vslide1up_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind { 830*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_vx_nxv16i32_nxv16i32_i32: 831*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 832*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma 833*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v16, v8, a0 834*99fb40d4SCraig Topper; CHECK-NEXT: vmv.v.v v8, v16 835*99fb40d4SCraig Topper; CHECK-NEXT: ret 836*99fb40d4SCraig Topperentry: 837*99fb40d4SCraig Topper %a = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32( 838*99fb40d4SCraig Topper <vscale x 16 x i32> undef, 839*99fb40d4SCraig Topper <vscale x 16 x i32> %0, 840*99fb40d4SCraig Topper i32 %1, 841*99fb40d4SCraig Topper iXLen %2) 842*99fb40d4SCraig Topper 843*99fb40d4SCraig Topper ret <vscale x 16 x i32> %a 844*99fb40d4SCraig Topper} 845*99fb40d4SCraig Topper 846*99fb40d4SCraig Topperdeclare <vscale x 16 x i32> @llvm.riscv.vslide1up.mask.nxv16i32.i32( 847*99fb40d4SCraig Topper <vscale x 16 x i32>, 848*99fb40d4SCraig Topper <vscale x 16 x i32>, 849*99fb40d4SCraig Topper i32, 850*99fb40d4SCraig Topper <vscale x 16 x i1>, 851*99fb40d4SCraig Topper iXLen, 852*99fb40d4SCraig Topper iXLen) 853*99fb40d4SCraig Topper 854*99fb40d4SCraig Topperdefine <vscale x 16 x i32> @intrinsic_vslide1up_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 855*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i32_nxv16i32_i32: 856*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 857*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu 858*99fb40d4SCraig Topper; CHECK-NEXT: vslide1up.vx v8, v16, a0, v0.t 859*99fb40d4SCraig Topper; CHECK-NEXT: ret 860*99fb40d4SCraig Topperentry: 861*99fb40d4SCraig Topper %a = call <vscale x 16 x i32> @llvm.riscv.vslide1up.mask.nxv16i32.i32( 862*99fb40d4SCraig Topper <vscale x 16 x i32> %0, 863*99fb40d4SCraig Topper <vscale x 16 x i32> %1, 864*99fb40d4SCraig Topper i32 %2, 865*99fb40d4SCraig Topper <vscale x 16 x i1> %3, 866*99fb40d4SCraig Topper iXLen %4, iXLen 1) 867*99fb40d4SCraig Topper 868*99fb40d4SCraig Topper ret <vscale x 16 x i32> %a 869*99fb40d4SCraig Topper} 870*99fb40d4SCraig Topper 871*99fb40d4SCraig Topperdeclare <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64( 872*99fb40d4SCraig Topper <vscale x 1 x i64>, 873*99fb40d4SCraig Topper <vscale x 1 x i64>, 874*99fb40d4SCraig Topper i64, 875*99fb40d4SCraig Topper iXLen) 876*99fb40d4SCraig Topper 877*99fb40d4SCraig Topperdefine <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind { 878*99fb40d4SCraig Topper; RV32-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64: 879*99fb40d4SCraig Topper; RV32: # %bb.0: # %entry 880*99fb40d4SCraig Topper; RV32-NEXT: vsetvli a2, a2, e64, m1, ta, ma 881*99fb40d4SCraig Topper; RV32-NEXT: slli a2, a2, 1 882*99fb40d4SCraig Topper; RV32-NEXT: vsetvli zero, a2, e32, m1, ta, ma 883*99fb40d4SCraig Topper; RV32-NEXT: vslide1up.vx v9, v8, a1 884*99fb40d4SCraig Topper; RV32-NEXT: vslide1up.vx v8, v9, a0 885*99fb40d4SCraig Topper; RV32-NEXT: ret 886*99fb40d4SCraig Topper; 887*99fb40d4SCraig Topper; RV64-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64: 888*99fb40d4SCraig Topper; RV64: # %bb.0: # %entry 889*99fb40d4SCraig Topper; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma 890*99fb40d4SCraig Topper; RV64-NEXT: vslide1up.vx v9, v8, a0 891*99fb40d4SCraig Topper; RV64-NEXT: vmv.v.v v8, v9 892*99fb40d4SCraig Topper; RV64-NEXT: ret 893*99fb40d4SCraig Topperentry: 894*99fb40d4SCraig Topper %a = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64( 895*99fb40d4SCraig Topper <vscale x 1 x i64> undef, 896*99fb40d4SCraig Topper <vscale x 1 x i64> %0, 897*99fb40d4SCraig Topper i64 %1, 898*99fb40d4SCraig Topper iXLen %2) 899*99fb40d4SCraig Topper 900*99fb40d4SCraig Topper ret <vscale x 1 x i64> %a 901*99fb40d4SCraig Topper} 902*99fb40d4SCraig Topper 903*99fb40d4SCraig Topperdeclare <vscale x 1 x i64> @llvm.riscv.vslide1up.mask.nxv1i64.i64( 904*99fb40d4SCraig Topper <vscale x 1 x i64>, 905*99fb40d4SCraig Topper <vscale x 1 x i64>, 906*99fb40d4SCraig Topper i64, 907*99fb40d4SCraig Topper <vscale x 1 x i1>, 908*99fb40d4SCraig Topper iXLen, 909*99fb40d4SCraig Topper iXLen) 910*99fb40d4SCraig Topper 911*99fb40d4SCraig Topperdefine <vscale x 1 x i64> @intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 912*99fb40d4SCraig Topper; RV32-LABEL: intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64: 913*99fb40d4SCraig Topper; RV32: # %bb.0: # %entry 914*99fb40d4SCraig Topper; RV32-NEXT: vsetvli a3, a2, e64, m1, ta, ma 915*99fb40d4SCraig Topper; RV32-NEXT: slli a3, a3, 1 916*99fb40d4SCraig Topper; RV32-NEXT: vsetvli zero, a3, e32, m1, ta, ma 917*99fb40d4SCraig Topper; RV32-NEXT: vslide1up.vx v10, v9, a1 918*99fb40d4SCraig Topper; RV32-NEXT: vslide1up.vx v9, v10, a0 919*99fb40d4SCraig Topper; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma 920*99fb40d4SCraig Topper; RV32-NEXT: vmerge.vvm v8, v8, v9, v0 921*99fb40d4SCraig Topper; RV32-NEXT: ret 922*99fb40d4SCraig Topper; 923*99fb40d4SCraig Topper; RV64-LABEL: intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64: 924*99fb40d4SCraig Topper; RV64: # %bb.0: # %entry 925*99fb40d4SCraig Topper; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu 926*99fb40d4SCraig Topper; RV64-NEXT: vslide1up.vx v8, v9, a0, v0.t 927*99fb40d4SCraig Topper; RV64-NEXT: ret 928*99fb40d4SCraig Topperentry: 929*99fb40d4SCraig Topper %a = call <vscale x 1 x i64> @llvm.riscv.vslide1up.mask.nxv1i64.i64( 930*99fb40d4SCraig Topper <vscale x 1 x i64> %0, 931*99fb40d4SCraig Topper <vscale x 1 x i64> %1, 932*99fb40d4SCraig Topper i64 %2, 933*99fb40d4SCraig Topper <vscale x 1 x i1> %3, 934*99fb40d4SCraig Topper iXLen %4, iXLen 1) 935*99fb40d4SCraig Topper 936*99fb40d4SCraig Topper ret <vscale x 1 x i64> %a 937*99fb40d4SCraig Topper} 938*99fb40d4SCraig Topper 939*99fb40d4SCraig Topperdeclare <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64( 940*99fb40d4SCraig Topper <vscale x 2 x i64>, 941*99fb40d4SCraig Topper <vscale x 2 x i64>, 942*99fb40d4SCraig Topper i64, 943*99fb40d4SCraig Topper iXLen) 944*99fb40d4SCraig Topper 945*99fb40d4SCraig Topperdefine <vscale x 2 x i64> @intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind { 946*99fb40d4SCraig Topper; RV32-LABEL: intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64: 947*99fb40d4SCraig Topper; RV32: # %bb.0: # %entry 948*99fb40d4SCraig Topper; RV32-NEXT: vsetvli a2, a2, e64, m2, ta, ma 949*99fb40d4SCraig Topper; RV32-NEXT: slli a2, a2, 1 950*99fb40d4SCraig Topper; RV32-NEXT: vsetvli zero, a2, e32, m2, ta, ma 951*99fb40d4SCraig Topper; RV32-NEXT: vslide1up.vx v10, v8, a1 952*99fb40d4SCraig Topper; RV32-NEXT: vslide1up.vx v8, v10, a0 953*99fb40d4SCraig Topper; RV32-NEXT: ret 954*99fb40d4SCraig Topper; 955*99fb40d4SCraig Topper; RV64-LABEL: intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64: 956*99fb40d4SCraig Topper; RV64: # %bb.0: # %entry 957*99fb40d4SCraig Topper; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma 958*99fb40d4SCraig Topper; RV64-NEXT: vslide1up.vx v10, v8, a0 959*99fb40d4SCraig Topper; RV64-NEXT: vmv.v.v v8, v10 960*99fb40d4SCraig Topper; RV64-NEXT: ret 961*99fb40d4SCraig Topperentry: 962*99fb40d4SCraig Topper %a = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64( 963*99fb40d4SCraig Topper <vscale x 2 x i64> undef, 964*99fb40d4SCraig Topper <vscale x 2 x i64> %0, 965*99fb40d4SCraig Topper i64 %1, 966*99fb40d4SCraig Topper iXLen %2) 967*99fb40d4SCraig Topper 968*99fb40d4SCraig Topper ret <vscale x 2 x i64> %a 969*99fb40d4SCraig Topper} 970*99fb40d4SCraig Topper 971*99fb40d4SCraig Topperdeclare <vscale x 2 x i64> @llvm.riscv.vslide1up.mask.nxv2i64.i64( 972*99fb40d4SCraig Topper <vscale x 2 x i64>, 973*99fb40d4SCraig Topper <vscale x 2 x i64>, 974*99fb40d4SCraig Topper i64, 975*99fb40d4SCraig Topper <vscale x 2 x i1>, 976*99fb40d4SCraig Topper iXLen, 977*99fb40d4SCraig Topper iXLen) 978*99fb40d4SCraig Topper 979*99fb40d4SCraig Topperdefine <vscale x 2 x i64> @intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 980*99fb40d4SCraig Topper; RV32-LABEL: intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64: 981*99fb40d4SCraig Topper; RV32: # %bb.0: # %entry 982*99fb40d4SCraig Topper; RV32-NEXT: vsetvli a3, a2, e64, m2, ta, ma 983*99fb40d4SCraig Topper; RV32-NEXT: slli a3, a3, 1 984*99fb40d4SCraig Topper; RV32-NEXT: vsetvli zero, a3, e32, m2, ta, ma 985*99fb40d4SCraig Topper; RV32-NEXT: vslide1up.vx v12, v10, a1 986*99fb40d4SCraig Topper; RV32-NEXT: vslide1up.vx v10, v12, a0 987*99fb40d4SCraig Topper; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma 988*99fb40d4SCraig Topper; RV32-NEXT: vmerge.vvm v8, v8, v10, v0 989*99fb40d4SCraig Topper; RV32-NEXT: ret 990*99fb40d4SCraig Topper; 991*99fb40d4SCraig Topper; RV64-LABEL: intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64: 992*99fb40d4SCraig Topper; RV64: # %bb.0: # %entry 993*99fb40d4SCraig Topper; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu 994*99fb40d4SCraig Topper; RV64-NEXT: vslide1up.vx v8, v10, a0, v0.t 995*99fb40d4SCraig Topper; RV64-NEXT: ret 996*99fb40d4SCraig Topperentry: 997*99fb40d4SCraig Topper %a = call <vscale x 2 x i64> @llvm.riscv.vslide1up.mask.nxv2i64.i64( 998*99fb40d4SCraig Topper <vscale x 2 x i64> %0, 999*99fb40d4SCraig Topper <vscale x 2 x i64> %1, 1000*99fb40d4SCraig Topper i64 %2, 1001*99fb40d4SCraig Topper <vscale x 2 x i1> %3, 1002*99fb40d4SCraig Topper iXLen %4, iXLen 1) 1003*99fb40d4SCraig Topper 1004*99fb40d4SCraig Topper ret <vscale x 2 x i64> %a 1005*99fb40d4SCraig Topper} 1006*99fb40d4SCraig Topper 1007*99fb40d4SCraig Topperdeclare <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64( 1008*99fb40d4SCraig Topper <vscale x 4 x i64>, 1009*99fb40d4SCraig Topper <vscale x 4 x i64>, 1010*99fb40d4SCraig Topper i64, 1011*99fb40d4SCraig Topper iXLen) 1012*99fb40d4SCraig Topper 1013*99fb40d4SCraig Topperdefine <vscale x 4 x i64> @intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind { 1014*99fb40d4SCraig Topper; RV32-LABEL: intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64: 1015*99fb40d4SCraig Topper; RV32: # %bb.0: # %entry 1016*99fb40d4SCraig Topper; RV32-NEXT: vsetvli a2, a2, e64, m4, ta, ma 1017*99fb40d4SCraig Topper; RV32-NEXT: slli a2, a2, 1 1018*99fb40d4SCraig Topper; RV32-NEXT: vsetvli zero, a2, e32, m4, ta, ma 1019*99fb40d4SCraig Topper; RV32-NEXT: vslide1up.vx v12, v8, a1 1020*99fb40d4SCraig Topper; RV32-NEXT: vslide1up.vx v8, v12, a0 1021*99fb40d4SCraig Topper; RV32-NEXT: ret 1022*99fb40d4SCraig Topper; 1023*99fb40d4SCraig Topper; RV64-LABEL: intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64: 1024*99fb40d4SCraig Topper; RV64: # %bb.0: # %entry 1025*99fb40d4SCraig Topper; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma 1026*99fb40d4SCraig Topper; RV64-NEXT: vslide1up.vx v12, v8, a0 1027*99fb40d4SCraig Topper; RV64-NEXT: vmv.v.v v8, v12 1028*99fb40d4SCraig Topper; RV64-NEXT: ret 1029*99fb40d4SCraig Topperentry: 1030*99fb40d4SCraig Topper %a = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64( 1031*99fb40d4SCraig Topper <vscale x 4 x i64> undef, 1032*99fb40d4SCraig Topper <vscale x 4 x i64> %0, 1033*99fb40d4SCraig Topper i64 %1, 1034*99fb40d4SCraig Topper iXLen %2) 1035*99fb40d4SCraig Topper 1036*99fb40d4SCraig Topper ret <vscale x 4 x i64> %a 1037*99fb40d4SCraig Topper} 1038*99fb40d4SCraig Topper 1039*99fb40d4SCraig Topperdeclare <vscale x 4 x i64> @llvm.riscv.vslide1up.mask.nxv4i64.i64( 1040*99fb40d4SCraig Topper <vscale x 4 x i64>, 1041*99fb40d4SCraig Topper <vscale x 4 x i64>, 1042*99fb40d4SCraig Topper i64, 1043*99fb40d4SCraig Topper <vscale x 4 x i1>, 1044*99fb40d4SCraig Topper iXLen, 1045*99fb40d4SCraig Topper iXLen) 1046*99fb40d4SCraig Topper 1047*99fb40d4SCraig Topperdefine <vscale x 4 x i64> @intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1048*99fb40d4SCraig Topper; RV32-LABEL: intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64: 1049*99fb40d4SCraig Topper; RV32: # %bb.0: # %entry 1050*99fb40d4SCraig Topper; RV32-NEXT: vsetvli a3, a2, e64, m4, ta, ma 1051*99fb40d4SCraig Topper; RV32-NEXT: slli a3, a3, 1 1052*99fb40d4SCraig Topper; RV32-NEXT: vsetvli zero, a3, e32, m4, ta, ma 1053*99fb40d4SCraig Topper; RV32-NEXT: vslide1up.vx v16, v12, a1 1054*99fb40d4SCraig Topper; RV32-NEXT: vslide1up.vx v12, v16, a0 1055*99fb40d4SCraig Topper; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma 1056*99fb40d4SCraig Topper; RV32-NEXT: vmerge.vvm v8, v8, v12, v0 1057*99fb40d4SCraig Topper; RV32-NEXT: ret 1058*99fb40d4SCraig Topper; 1059*99fb40d4SCraig Topper; RV64-LABEL: intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64: 1060*99fb40d4SCraig Topper; RV64: # %bb.0: # %entry 1061*99fb40d4SCraig Topper; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu 1062*99fb40d4SCraig Topper; RV64-NEXT: vslide1up.vx v8, v12, a0, v0.t 1063*99fb40d4SCraig Topper; RV64-NEXT: ret 1064*99fb40d4SCraig Topperentry: 1065*99fb40d4SCraig Topper %a = call <vscale x 4 x i64> @llvm.riscv.vslide1up.mask.nxv4i64.i64( 1066*99fb40d4SCraig Topper <vscale x 4 x i64> %0, 1067*99fb40d4SCraig Topper <vscale x 4 x i64> %1, 1068*99fb40d4SCraig Topper i64 %2, 1069*99fb40d4SCraig Topper <vscale x 4 x i1> %3, 1070*99fb40d4SCraig Topper iXLen %4, iXLen 1) 1071*99fb40d4SCraig Topper 1072*99fb40d4SCraig Topper ret <vscale x 4 x i64> %a 1073*99fb40d4SCraig Topper} 1074*99fb40d4SCraig Topper 1075*99fb40d4SCraig Topperdeclare <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64( 1076*99fb40d4SCraig Topper <vscale x 8 x i64>, 1077*99fb40d4SCraig Topper <vscale x 8 x i64>, 1078*99fb40d4SCraig Topper i64, 1079*99fb40d4SCraig Topper iXLen) 1080*99fb40d4SCraig Topper 1081*99fb40d4SCraig Topperdefine <vscale x 8 x i64> @intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind { 1082*99fb40d4SCraig Topper; RV32-LABEL: intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64: 1083*99fb40d4SCraig Topper; RV32: # %bb.0: # %entry 1084*99fb40d4SCraig Topper; RV32-NEXT: vsetvli a2, a2, e64, m8, ta, ma 1085*99fb40d4SCraig Topper; RV32-NEXT: slli a2, a2, 1 1086*99fb40d4SCraig Topper; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma 1087*99fb40d4SCraig Topper; RV32-NEXT: vslide1up.vx v16, v8, a1 1088*99fb40d4SCraig Topper; RV32-NEXT: vslide1up.vx v8, v16, a0 1089*99fb40d4SCraig Topper; RV32-NEXT: ret 1090*99fb40d4SCraig Topper; 1091*99fb40d4SCraig Topper; RV64-LABEL: intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64: 1092*99fb40d4SCraig Topper; RV64: # %bb.0: # %entry 1093*99fb40d4SCraig Topper; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma 1094*99fb40d4SCraig Topper; RV64-NEXT: vslide1up.vx v16, v8, a0 1095*99fb40d4SCraig Topper; RV64-NEXT: vmv.v.v v8, v16 1096*99fb40d4SCraig Topper; RV64-NEXT: ret 1097*99fb40d4SCraig Topperentry: 1098*99fb40d4SCraig Topper %a = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64( 1099*99fb40d4SCraig Topper <vscale x 8 x i64> undef, 1100*99fb40d4SCraig Topper <vscale x 8 x i64> %0, 1101*99fb40d4SCraig Topper i64 %1, 1102*99fb40d4SCraig Topper iXLen %2) 1103*99fb40d4SCraig Topper 1104*99fb40d4SCraig Topper ret <vscale x 8 x i64> %a 1105*99fb40d4SCraig Topper} 1106*99fb40d4SCraig Topper 1107*99fb40d4SCraig Topperdeclare <vscale x 8 x i64> @llvm.riscv.vslide1up.mask.nxv8i64.i64( 1108*99fb40d4SCraig Topper <vscale x 8 x i64>, 1109*99fb40d4SCraig Topper <vscale x 8 x i64>, 1110*99fb40d4SCraig Topper i64, 1111*99fb40d4SCraig Topper <vscale x 8 x i1>, 1112*99fb40d4SCraig Topper iXLen, 1113*99fb40d4SCraig Topper iXLen) 1114*99fb40d4SCraig Topper 1115*99fb40d4SCraig Topperdefine <vscale x 8 x i64> @intrinsic_vslide1up_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1116*99fb40d4SCraig Topper; RV32-LABEL: intrinsic_vslide1up_mask_vx_nxv8i64_nxv8i64_i64: 1117*99fb40d4SCraig Topper; RV32: # %bb.0: # %entry 1118*99fb40d4SCraig Topper; RV32-NEXT: vsetvli a3, a2, e64, m8, ta, ma 1119*99fb40d4SCraig Topper; RV32-NEXT: slli a3, a3, 1 1120*99fb40d4SCraig Topper; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma 1121*99fb40d4SCraig Topper; RV32-NEXT: vslide1up.vx v24, v16, a1 1122*99fb40d4SCraig Topper; RV32-NEXT: vslide1up.vx v16, v24, a0 1123*99fb40d4SCraig Topper; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma 1124*99fb40d4SCraig Topper; RV32-NEXT: vmerge.vvm v8, v8, v16, v0 1125*99fb40d4SCraig Topper; RV32-NEXT: ret 1126*99fb40d4SCraig Topper; 1127*99fb40d4SCraig Topper; RV64-LABEL: intrinsic_vslide1up_mask_vx_nxv8i64_nxv8i64_i64: 1128*99fb40d4SCraig Topper; RV64: # %bb.0: # %entry 1129*99fb40d4SCraig Topper; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu 1130*99fb40d4SCraig Topper; RV64-NEXT: vslide1up.vx v8, v16, a0, v0.t 1131*99fb40d4SCraig Topper; RV64-NEXT: ret 1132*99fb40d4SCraig Topperentry: 1133*99fb40d4SCraig Topper %a = call <vscale x 8 x i64> @llvm.riscv.vslide1up.mask.nxv8i64.i64( 1134*99fb40d4SCraig Topper <vscale x 8 x i64> %0, 1135*99fb40d4SCraig Topper <vscale x 8 x i64> %1, 1136*99fb40d4SCraig Topper i64 %2, 1137*99fb40d4SCraig Topper <vscale x 8 x i1> %3, 1138*99fb40d4SCraig Topper iXLen %4, iXLen 1) 1139*99fb40d4SCraig Topper 1140*99fb40d4SCraig Topper ret <vscale x 8 x i64> %a 1141*99fb40d4SCraig Topper} 1142