1*99fb40d4SCraig Topper; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2*99fb40d4SCraig Topper; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ 3*99fb40d4SCraig Topper; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 4*99fb40d4SCraig Topper; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ 5*99fb40d4SCraig Topper; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 6*99fb40d4SCraig Topper 7*99fb40d4SCraig Topperdeclare <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8( 8*99fb40d4SCraig Topper <vscale x 1 x i8>, 9*99fb40d4SCraig Topper <vscale x 1 x i8>, 10*99fb40d4SCraig Topper i8, 11*99fb40d4SCraig Topper iXLen) 12*99fb40d4SCraig Topper 13*99fb40d4SCraig Topperdefine <vscale x 1 x i8> @intrinsic_vslide1down_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind { 14*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i8_nxv1i8_i8: 15*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 16*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 17*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v8, a0 18*99fb40d4SCraig Topper; CHECK-NEXT: ret 19*99fb40d4SCraig Topperentry: 20*99fb40d4SCraig Topper %a = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8( 21*99fb40d4SCraig Topper <vscale x 1 x i8> undef, 22*99fb40d4SCraig Topper <vscale x 1 x i8> %0, 23*99fb40d4SCraig Topper i8 %1, 24*99fb40d4SCraig Topper iXLen %2) 25*99fb40d4SCraig Topper 26*99fb40d4SCraig Topper ret <vscale x 1 x i8> %a 27*99fb40d4SCraig Topper} 28*99fb40d4SCraig Topper 29*99fb40d4SCraig Topperdeclare <vscale x 1 x i8> @llvm.riscv.vslide1down.mask.nxv1i8.i8( 30*99fb40d4SCraig Topper <vscale x 1 x i8>, 31*99fb40d4SCraig Topper <vscale x 1 x i8>, 32*99fb40d4SCraig Topper i8, 33*99fb40d4SCraig Topper <vscale x 1 x i1>, 34*99fb40d4SCraig Topper iXLen, 35*99fb40d4SCraig Topper iXLen) 36*99fb40d4SCraig Topper 37*99fb40d4SCraig Topperdefine <vscale x 1 x i8> @intrinsic_vslide1down_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 38*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i8_nxv1i8_i8: 39*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 40*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu 41*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v9, a0, v0.t 42*99fb40d4SCraig Topper; CHECK-NEXT: ret 43*99fb40d4SCraig Topperentry: 44*99fb40d4SCraig Topper %a = call <vscale x 1 x i8> @llvm.riscv.vslide1down.mask.nxv1i8.i8( 45*99fb40d4SCraig Topper <vscale x 1 x i8> %0, 46*99fb40d4SCraig Topper <vscale x 1 x i8> %1, 47*99fb40d4SCraig Topper i8 %2, 48*99fb40d4SCraig Topper <vscale x 1 x i1> %3, 49*99fb40d4SCraig Topper iXLen %4, iXLen 1) 50*99fb40d4SCraig Topper 51*99fb40d4SCraig Topper ret <vscale x 1 x i8> %a 52*99fb40d4SCraig Topper} 53*99fb40d4SCraig Topper 54*99fb40d4SCraig Topperdeclare <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8( 55*99fb40d4SCraig Topper <vscale x 2 x i8>, 56*99fb40d4SCraig Topper <vscale x 2 x i8>, 57*99fb40d4SCraig Topper i8, 58*99fb40d4SCraig Topper iXLen) 59*99fb40d4SCraig Topper 60*99fb40d4SCraig Topperdefine <vscale x 2 x i8> @intrinsic_vslide1down_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind { 61*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i8_nxv2i8_i8: 62*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 63*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 64*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v8, a0 65*99fb40d4SCraig Topper; CHECK-NEXT: ret 66*99fb40d4SCraig Topperentry: 67*99fb40d4SCraig Topper %a = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8( 68*99fb40d4SCraig Topper <vscale x 2 x i8> undef, 69*99fb40d4SCraig Topper <vscale x 2 x i8> %0, 70*99fb40d4SCraig Topper i8 %1, 71*99fb40d4SCraig Topper iXLen %2) 72*99fb40d4SCraig Topper 73*99fb40d4SCraig Topper ret <vscale x 2 x i8> %a 74*99fb40d4SCraig Topper} 75*99fb40d4SCraig Topper 76*99fb40d4SCraig Topperdeclare <vscale x 2 x i8> @llvm.riscv.vslide1down.mask.nxv2i8.i8( 77*99fb40d4SCraig Topper <vscale x 2 x i8>, 78*99fb40d4SCraig Topper <vscale x 2 x i8>, 79*99fb40d4SCraig Topper i8, 80*99fb40d4SCraig Topper <vscale x 2 x i1>, 81*99fb40d4SCraig Topper iXLen, 82*99fb40d4SCraig Topper iXLen) 83*99fb40d4SCraig Topper 84*99fb40d4SCraig Topperdefine <vscale x 2 x i8> @intrinsic_vslide1down_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 85*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i8_nxv2i8_i8: 86*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 87*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu 88*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v9, a0, v0.t 89*99fb40d4SCraig Topper; CHECK-NEXT: ret 90*99fb40d4SCraig Topperentry: 91*99fb40d4SCraig Topper %a = call <vscale x 2 x i8> @llvm.riscv.vslide1down.mask.nxv2i8.i8( 92*99fb40d4SCraig Topper <vscale x 2 x i8> %0, 93*99fb40d4SCraig Topper <vscale x 2 x i8> %1, 94*99fb40d4SCraig Topper i8 %2, 95*99fb40d4SCraig Topper <vscale x 2 x i1> %3, 96*99fb40d4SCraig Topper iXLen %4, iXLen 1) 97*99fb40d4SCraig Topper 98*99fb40d4SCraig Topper ret <vscale x 2 x i8> %a 99*99fb40d4SCraig Topper} 100*99fb40d4SCraig Topper 101*99fb40d4SCraig Topperdeclare <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8( 102*99fb40d4SCraig Topper <vscale x 4 x i8>, 103*99fb40d4SCraig Topper <vscale x 4 x i8>, 104*99fb40d4SCraig Topper i8, 105*99fb40d4SCraig Topper iXLen) 106*99fb40d4SCraig Topper 107*99fb40d4SCraig Topperdefine <vscale x 4 x i8> @intrinsic_vslide1down_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind { 108*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i8_nxv4i8_i8: 109*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 110*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 111*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v8, a0 112*99fb40d4SCraig Topper; CHECK-NEXT: ret 113*99fb40d4SCraig Topperentry: 114*99fb40d4SCraig Topper %a = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8( 115*99fb40d4SCraig Topper <vscale x 4 x i8> undef, 116*99fb40d4SCraig Topper <vscale x 4 x i8> %0, 117*99fb40d4SCraig Topper i8 %1, 118*99fb40d4SCraig Topper iXLen %2) 119*99fb40d4SCraig Topper 120*99fb40d4SCraig Topper ret <vscale x 4 x i8> %a 121*99fb40d4SCraig Topper} 122*99fb40d4SCraig Topper 123*99fb40d4SCraig Topperdeclare <vscale x 4 x i8> @llvm.riscv.vslide1down.mask.nxv4i8.i8( 124*99fb40d4SCraig Topper <vscale x 4 x i8>, 125*99fb40d4SCraig Topper <vscale x 4 x i8>, 126*99fb40d4SCraig Topper i8, 127*99fb40d4SCraig Topper <vscale x 4 x i1>, 128*99fb40d4SCraig Topper iXLen, 129*99fb40d4SCraig Topper iXLen) 130*99fb40d4SCraig Topper 131*99fb40d4SCraig Topperdefine <vscale x 4 x i8> @intrinsic_vslide1down_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 132*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i8_nxv4i8_i8: 133*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 134*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu 135*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v9, a0, v0.t 136*99fb40d4SCraig Topper; CHECK-NEXT: ret 137*99fb40d4SCraig Topperentry: 138*99fb40d4SCraig Topper %a = call <vscale x 4 x i8> @llvm.riscv.vslide1down.mask.nxv4i8.i8( 139*99fb40d4SCraig Topper <vscale x 4 x i8> %0, 140*99fb40d4SCraig Topper <vscale x 4 x i8> %1, 141*99fb40d4SCraig Topper i8 %2, 142*99fb40d4SCraig Topper <vscale x 4 x i1> %3, 143*99fb40d4SCraig Topper iXLen %4, iXLen 1) 144*99fb40d4SCraig Topper 145*99fb40d4SCraig Topper ret <vscale x 4 x i8> %a 146*99fb40d4SCraig Topper} 147*99fb40d4SCraig Topper 148*99fb40d4SCraig Topperdeclare <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8( 149*99fb40d4SCraig Topper <vscale x 8 x i8>, 150*99fb40d4SCraig Topper <vscale x 8 x i8>, 151*99fb40d4SCraig Topper i8, 152*99fb40d4SCraig Topper iXLen) 153*99fb40d4SCraig Topper 154*99fb40d4SCraig Topperdefine <vscale x 8 x i8> @intrinsic_vslide1down_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind { 155*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i8_nxv8i8_i8: 156*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 157*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 158*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v8, a0 159*99fb40d4SCraig Topper; CHECK-NEXT: ret 160*99fb40d4SCraig Topperentry: 161*99fb40d4SCraig Topper %a = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8( 162*99fb40d4SCraig Topper <vscale x 8 x i8> undef, 163*99fb40d4SCraig Topper <vscale x 8 x i8> %0, 164*99fb40d4SCraig Topper i8 %1, 165*99fb40d4SCraig Topper iXLen %2) 166*99fb40d4SCraig Topper 167*99fb40d4SCraig Topper ret <vscale x 8 x i8> %a 168*99fb40d4SCraig Topper} 169*99fb40d4SCraig Topper 170*99fb40d4SCraig Topperdeclare <vscale x 8 x i8> @llvm.riscv.vslide1down.mask.nxv8i8.i8( 171*99fb40d4SCraig Topper <vscale x 8 x i8>, 172*99fb40d4SCraig Topper <vscale x 8 x i8>, 173*99fb40d4SCraig Topper i8, 174*99fb40d4SCraig Topper <vscale x 8 x i1>, 175*99fb40d4SCraig Topper iXLen, 176*99fb40d4SCraig Topper iXLen) 177*99fb40d4SCraig Topper 178*99fb40d4SCraig Topperdefine <vscale x 8 x i8> @intrinsic_vslide1down_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 179*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i8_nxv8i8_i8: 180*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 181*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu 182*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v9, a0, v0.t 183*99fb40d4SCraig Topper; CHECK-NEXT: ret 184*99fb40d4SCraig Topperentry: 185*99fb40d4SCraig Topper %a = call <vscale x 8 x i8> @llvm.riscv.vslide1down.mask.nxv8i8.i8( 186*99fb40d4SCraig Topper <vscale x 8 x i8> %0, 187*99fb40d4SCraig Topper <vscale x 8 x i8> %1, 188*99fb40d4SCraig Topper i8 %2, 189*99fb40d4SCraig Topper <vscale x 8 x i1> %3, 190*99fb40d4SCraig Topper iXLen %4, iXLen 1) 191*99fb40d4SCraig Topper 192*99fb40d4SCraig Topper ret <vscale x 8 x i8> %a 193*99fb40d4SCraig Topper} 194*99fb40d4SCraig Topper 195*99fb40d4SCraig Topperdeclare <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8( 196*99fb40d4SCraig Topper <vscale x 16 x i8>, 197*99fb40d4SCraig Topper <vscale x 16 x i8>, 198*99fb40d4SCraig Topper i8, 199*99fb40d4SCraig Topper iXLen) 200*99fb40d4SCraig Topper 201*99fb40d4SCraig Topperdefine <vscale x 16 x i8> @intrinsic_vslide1down_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind { 202*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_vx_nxv16i8_nxv16i8_i8: 203*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 204*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 205*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v8, a0 206*99fb40d4SCraig Topper; CHECK-NEXT: ret 207*99fb40d4SCraig Topperentry: 208*99fb40d4SCraig Topper %a = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8( 209*99fb40d4SCraig Topper <vscale x 16 x i8> undef, 210*99fb40d4SCraig Topper <vscale x 16 x i8> %0, 211*99fb40d4SCraig Topper i8 %1, 212*99fb40d4SCraig Topper iXLen %2) 213*99fb40d4SCraig Topper 214*99fb40d4SCraig Topper ret <vscale x 16 x i8> %a 215*99fb40d4SCraig Topper} 216*99fb40d4SCraig Topper 217*99fb40d4SCraig Topperdeclare <vscale x 16 x i8> @llvm.riscv.vslide1down.mask.nxv16i8.i8( 218*99fb40d4SCraig Topper <vscale x 16 x i8>, 219*99fb40d4SCraig Topper <vscale x 16 x i8>, 220*99fb40d4SCraig Topper i8, 221*99fb40d4SCraig Topper <vscale x 16 x i1>, 222*99fb40d4SCraig Topper iXLen, 223*99fb40d4SCraig Topper iXLen) 224*99fb40d4SCraig Topper 225*99fb40d4SCraig Topperdefine <vscale x 16 x i8> @intrinsic_vslide1down_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 226*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i8_nxv16i8_i8: 227*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 228*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu 229*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v10, a0, v0.t 230*99fb40d4SCraig Topper; CHECK-NEXT: ret 231*99fb40d4SCraig Topperentry: 232*99fb40d4SCraig Topper %a = call <vscale x 16 x i8> @llvm.riscv.vslide1down.mask.nxv16i8.i8( 233*99fb40d4SCraig Topper <vscale x 16 x i8> %0, 234*99fb40d4SCraig Topper <vscale x 16 x i8> %1, 235*99fb40d4SCraig Topper i8 %2, 236*99fb40d4SCraig Topper <vscale x 16 x i1> %3, 237*99fb40d4SCraig Topper iXLen %4, iXLen 1) 238*99fb40d4SCraig Topper 239*99fb40d4SCraig Topper ret <vscale x 16 x i8> %a 240*99fb40d4SCraig Topper} 241*99fb40d4SCraig Topper 242*99fb40d4SCraig Topperdeclare <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8( 243*99fb40d4SCraig Topper <vscale x 32 x i8>, 244*99fb40d4SCraig Topper <vscale x 32 x i8>, 245*99fb40d4SCraig Topper i8, 246*99fb40d4SCraig Topper iXLen) 247*99fb40d4SCraig Topper 248*99fb40d4SCraig Topperdefine <vscale x 32 x i8> @intrinsic_vslide1down_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind { 249*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_vx_nxv32i8_nxv32i8_i8: 250*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 251*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma 252*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v8, a0 253*99fb40d4SCraig Topper; CHECK-NEXT: ret 254*99fb40d4SCraig Topperentry: 255*99fb40d4SCraig Topper %a = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8( 256*99fb40d4SCraig Topper <vscale x 32 x i8> undef, 257*99fb40d4SCraig Topper <vscale x 32 x i8> %0, 258*99fb40d4SCraig Topper i8 %1, 259*99fb40d4SCraig Topper iXLen %2) 260*99fb40d4SCraig Topper 261*99fb40d4SCraig Topper ret <vscale x 32 x i8> %a 262*99fb40d4SCraig Topper} 263*99fb40d4SCraig Topper 264*99fb40d4SCraig Topperdeclare <vscale x 32 x i8> @llvm.riscv.vslide1down.mask.nxv32i8.i8( 265*99fb40d4SCraig Topper <vscale x 32 x i8>, 266*99fb40d4SCraig Topper <vscale x 32 x i8>, 267*99fb40d4SCraig Topper i8, 268*99fb40d4SCraig Topper <vscale x 32 x i1>, 269*99fb40d4SCraig Topper iXLen, 270*99fb40d4SCraig Topper iXLen) 271*99fb40d4SCraig Topper 272*99fb40d4SCraig Topperdefine <vscale x 32 x i8> @intrinsic_vslide1down_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 273*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv32i8_nxv32i8_i8: 274*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 275*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu 276*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v12, a0, v0.t 277*99fb40d4SCraig Topper; CHECK-NEXT: ret 278*99fb40d4SCraig Topperentry: 279*99fb40d4SCraig Topper %a = call <vscale x 32 x i8> @llvm.riscv.vslide1down.mask.nxv32i8.i8( 280*99fb40d4SCraig Topper <vscale x 32 x i8> %0, 281*99fb40d4SCraig Topper <vscale x 32 x i8> %1, 282*99fb40d4SCraig Topper i8 %2, 283*99fb40d4SCraig Topper <vscale x 32 x i1> %3, 284*99fb40d4SCraig Topper iXLen %4, iXLen 1) 285*99fb40d4SCraig Topper 286*99fb40d4SCraig Topper ret <vscale x 32 x i8> %a 287*99fb40d4SCraig Topper} 288*99fb40d4SCraig Topper 289*99fb40d4SCraig Topperdeclare <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8( 290*99fb40d4SCraig Topper <vscale x 64 x i8>, 291*99fb40d4SCraig Topper <vscale x 64 x i8>, 292*99fb40d4SCraig Topper i8, 293*99fb40d4SCraig Topper iXLen) 294*99fb40d4SCraig Topper 295*99fb40d4SCraig Topperdefine <vscale x 64 x i8> @intrinsic_vslide1down_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind { 296*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_vx_nxv64i8_nxv64i8_i8: 297*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 298*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma 299*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v8, a0 300*99fb40d4SCraig Topper; CHECK-NEXT: ret 301*99fb40d4SCraig Topperentry: 302*99fb40d4SCraig Topper %a = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8( 303*99fb40d4SCraig Topper <vscale x 64 x i8> undef, 304*99fb40d4SCraig Topper <vscale x 64 x i8> %0, 305*99fb40d4SCraig Topper i8 %1, 306*99fb40d4SCraig Topper iXLen %2) 307*99fb40d4SCraig Topper 308*99fb40d4SCraig Topper ret <vscale x 64 x i8> %a 309*99fb40d4SCraig Topper} 310*99fb40d4SCraig Topper 311*99fb40d4SCraig Topperdeclare <vscale x 64 x i8> @llvm.riscv.vslide1down.mask.nxv64i8.i8( 312*99fb40d4SCraig Topper <vscale x 64 x i8>, 313*99fb40d4SCraig Topper <vscale x 64 x i8>, 314*99fb40d4SCraig Topper i8, 315*99fb40d4SCraig Topper <vscale x 64 x i1>, 316*99fb40d4SCraig Topper iXLen, 317*99fb40d4SCraig Topper iXLen) 318*99fb40d4SCraig Topper 319*99fb40d4SCraig Topperdefine <vscale x 64 x i8> @intrinsic_vslide1down_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind { 320*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv64i8_nxv64i8_i8: 321*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 322*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu 323*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v16, a0, v0.t 324*99fb40d4SCraig Topper; CHECK-NEXT: ret 325*99fb40d4SCraig Topperentry: 326*99fb40d4SCraig Topper %a = call <vscale x 64 x i8> @llvm.riscv.vslide1down.mask.nxv64i8.i8( 327*99fb40d4SCraig Topper <vscale x 64 x i8> %0, 328*99fb40d4SCraig Topper <vscale x 64 x i8> %1, 329*99fb40d4SCraig Topper i8 %2, 330*99fb40d4SCraig Topper <vscale x 64 x i1> %3, 331*99fb40d4SCraig Topper iXLen %4, iXLen 1) 332*99fb40d4SCraig Topper 333*99fb40d4SCraig Topper ret <vscale x 64 x i8> %a 334*99fb40d4SCraig Topper} 335*99fb40d4SCraig Topper 336*99fb40d4SCraig Topperdeclare <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16( 337*99fb40d4SCraig Topper <vscale x 1 x i16>, 338*99fb40d4SCraig Topper <vscale x 1 x i16>, 339*99fb40d4SCraig Topper i16, 340*99fb40d4SCraig Topper iXLen) 341*99fb40d4SCraig Topper 342*99fb40d4SCraig Topperdefine <vscale x 1 x i16> @intrinsic_vslide1down_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind { 343*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i16_nxv1i16_i16: 344*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 345*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 346*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v8, a0 347*99fb40d4SCraig Topper; CHECK-NEXT: ret 348*99fb40d4SCraig Topperentry: 349*99fb40d4SCraig Topper %a = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16( 350*99fb40d4SCraig Topper <vscale x 1 x i16> undef, 351*99fb40d4SCraig Topper <vscale x 1 x i16> %0, 352*99fb40d4SCraig Topper i16 %1, 353*99fb40d4SCraig Topper iXLen %2) 354*99fb40d4SCraig Topper 355*99fb40d4SCraig Topper ret <vscale x 1 x i16> %a 356*99fb40d4SCraig Topper} 357*99fb40d4SCraig Topper 358*99fb40d4SCraig Topperdeclare <vscale x 1 x i16> @llvm.riscv.vslide1down.mask.nxv1i16.i16( 359*99fb40d4SCraig Topper <vscale x 1 x i16>, 360*99fb40d4SCraig Topper <vscale x 1 x i16>, 361*99fb40d4SCraig Topper i16, 362*99fb40d4SCraig Topper <vscale x 1 x i1>, 363*99fb40d4SCraig Topper iXLen, 364*99fb40d4SCraig Topper iXLen) 365*99fb40d4SCraig Topper 366*99fb40d4SCraig Topperdefine <vscale x 1 x i16> @intrinsic_vslide1down_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 367*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i16_nxv1i16_i16: 368*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 369*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu 370*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v9, a0, v0.t 371*99fb40d4SCraig Topper; CHECK-NEXT: ret 372*99fb40d4SCraig Topperentry: 373*99fb40d4SCraig Topper %a = call <vscale x 1 x i16> @llvm.riscv.vslide1down.mask.nxv1i16.i16( 374*99fb40d4SCraig Topper <vscale x 1 x i16> %0, 375*99fb40d4SCraig Topper <vscale x 1 x i16> %1, 376*99fb40d4SCraig Topper i16 %2, 377*99fb40d4SCraig Topper <vscale x 1 x i1> %3, 378*99fb40d4SCraig Topper iXLen %4, iXLen 1) 379*99fb40d4SCraig Topper 380*99fb40d4SCraig Topper ret <vscale x 1 x i16> %a 381*99fb40d4SCraig Topper} 382*99fb40d4SCraig Topper 383*99fb40d4SCraig Topperdeclare <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16( 384*99fb40d4SCraig Topper <vscale x 2 x i16>, 385*99fb40d4SCraig Topper <vscale x 2 x i16>, 386*99fb40d4SCraig Topper i16, 387*99fb40d4SCraig Topper iXLen) 388*99fb40d4SCraig Topper 389*99fb40d4SCraig Topperdefine <vscale x 2 x i16> @intrinsic_vslide1down_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind { 390*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i16_nxv2i16_i16: 391*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 392*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 393*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v8, a0 394*99fb40d4SCraig Topper; CHECK-NEXT: ret 395*99fb40d4SCraig Topperentry: 396*99fb40d4SCraig Topper %a = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16( 397*99fb40d4SCraig Topper <vscale x 2 x i16> undef, 398*99fb40d4SCraig Topper <vscale x 2 x i16> %0, 399*99fb40d4SCraig Topper i16 %1, 400*99fb40d4SCraig Topper iXLen %2) 401*99fb40d4SCraig Topper 402*99fb40d4SCraig Topper ret <vscale x 2 x i16> %a 403*99fb40d4SCraig Topper} 404*99fb40d4SCraig Topper 405*99fb40d4SCraig Topperdeclare <vscale x 2 x i16> @llvm.riscv.vslide1down.mask.nxv2i16.i16( 406*99fb40d4SCraig Topper <vscale x 2 x i16>, 407*99fb40d4SCraig Topper <vscale x 2 x i16>, 408*99fb40d4SCraig Topper i16, 409*99fb40d4SCraig Topper <vscale x 2 x i1>, 410*99fb40d4SCraig Topper iXLen, 411*99fb40d4SCraig Topper iXLen) 412*99fb40d4SCraig Topper 413*99fb40d4SCraig Topperdefine <vscale x 2 x i16> @intrinsic_vslide1down_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 414*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i16_nxv2i16_i16: 415*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 416*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu 417*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v9, a0, v0.t 418*99fb40d4SCraig Topper; CHECK-NEXT: ret 419*99fb40d4SCraig Topperentry: 420*99fb40d4SCraig Topper %a = call <vscale x 2 x i16> @llvm.riscv.vslide1down.mask.nxv2i16.i16( 421*99fb40d4SCraig Topper <vscale x 2 x i16> %0, 422*99fb40d4SCraig Topper <vscale x 2 x i16> %1, 423*99fb40d4SCraig Topper i16 %2, 424*99fb40d4SCraig Topper <vscale x 2 x i1> %3, 425*99fb40d4SCraig Topper iXLen %4, iXLen 1) 426*99fb40d4SCraig Topper 427*99fb40d4SCraig Topper ret <vscale x 2 x i16> %a 428*99fb40d4SCraig Topper} 429*99fb40d4SCraig Topper 430*99fb40d4SCraig Topperdeclare <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16( 431*99fb40d4SCraig Topper <vscale x 4 x i16>, 432*99fb40d4SCraig Topper <vscale x 4 x i16>, 433*99fb40d4SCraig Topper i16, 434*99fb40d4SCraig Topper iXLen) 435*99fb40d4SCraig Topper 436*99fb40d4SCraig Topperdefine <vscale x 4 x i16> @intrinsic_vslide1down_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind { 437*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i16_nxv4i16_i16: 438*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 439*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 440*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v8, a0 441*99fb40d4SCraig Topper; CHECK-NEXT: ret 442*99fb40d4SCraig Topperentry: 443*99fb40d4SCraig Topper %a = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16( 444*99fb40d4SCraig Topper <vscale x 4 x i16> undef, 445*99fb40d4SCraig Topper <vscale x 4 x i16> %0, 446*99fb40d4SCraig Topper i16 %1, 447*99fb40d4SCraig Topper iXLen %2) 448*99fb40d4SCraig Topper 449*99fb40d4SCraig Topper ret <vscale x 4 x i16> %a 450*99fb40d4SCraig Topper} 451*99fb40d4SCraig Topper 452*99fb40d4SCraig Topperdeclare <vscale x 4 x i16> @llvm.riscv.vslide1down.mask.nxv4i16.i16( 453*99fb40d4SCraig Topper <vscale x 4 x i16>, 454*99fb40d4SCraig Topper <vscale x 4 x i16>, 455*99fb40d4SCraig Topper i16, 456*99fb40d4SCraig Topper <vscale x 4 x i1>, 457*99fb40d4SCraig Topper iXLen, 458*99fb40d4SCraig Topper iXLen) 459*99fb40d4SCraig Topper 460*99fb40d4SCraig Topperdefine <vscale x 4 x i16> @intrinsic_vslide1down_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 461*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i16_nxv4i16_i16: 462*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 463*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu 464*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v9, a0, v0.t 465*99fb40d4SCraig Topper; CHECK-NEXT: ret 466*99fb40d4SCraig Topperentry: 467*99fb40d4SCraig Topper %a = call <vscale x 4 x i16> @llvm.riscv.vslide1down.mask.nxv4i16.i16( 468*99fb40d4SCraig Topper <vscale x 4 x i16> %0, 469*99fb40d4SCraig Topper <vscale x 4 x i16> %1, 470*99fb40d4SCraig Topper i16 %2, 471*99fb40d4SCraig Topper <vscale x 4 x i1> %3, 472*99fb40d4SCraig Topper iXLen %4, iXLen 1) 473*99fb40d4SCraig Topper 474*99fb40d4SCraig Topper ret <vscale x 4 x i16> %a 475*99fb40d4SCraig Topper} 476*99fb40d4SCraig Topper 477*99fb40d4SCraig Topperdeclare <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16( 478*99fb40d4SCraig Topper <vscale x 8 x i16>, 479*99fb40d4SCraig Topper <vscale x 8 x i16>, 480*99fb40d4SCraig Topper i16, 481*99fb40d4SCraig Topper iXLen) 482*99fb40d4SCraig Topper 483*99fb40d4SCraig Topperdefine <vscale x 8 x i16> @intrinsic_vslide1down_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind { 484*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i16_nxv8i16_i16: 485*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 486*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 487*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v8, a0 488*99fb40d4SCraig Topper; CHECK-NEXT: ret 489*99fb40d4SCraig Topperentry: 490*99fb40d4SCraig Topper %a = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16( 491*99fb40d4SCraig Topper <vscale x 8 x i16> undef, 492*99fb40d4SCraig Topper <vscale x 8 x i16> %0, 493*99fb40d4SCraig Topper i16 %1, 494*99fb40d4SCraig Topper iXLen %2) 495*99fb40d4SCraig Topper 496*99fb40d4SCraig Topper ret <vscale x 8 x i16> %a 497*99fb40d4SCraig Topper} 498*99fb40d4SCraig Topper 499*99fb40d4SCraig Topperdeclare <vscale x 8 x i16> @llvm.riscv.vslide1down.mask.nxv8i16.i16( 500*99fb40d4SCraig Topper <vscale x 8 x i16>, 501*99fb40d4SCraig Topper <vscale x 8 x i16>, 502*99fb40d4SCraig Topper i16, 503*99fb40d4SCraig Topper <vscale x 8 x i1>, 504*99fb40d4SCraig Topper iXLen, 505*99fb40d4SCraig Topper iXLen) 506*99fb40d4SCraig Topper 507*99fb40d4SCraig Topperdefine <vscale x 8 x i16> @intrinsic_vslide1down_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 508*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i16_nxv8i16_i16: 509*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 510*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu 511*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v10, a0, v0.t 512*99fb40d4SCraig Topper; CHECK-NEXT: ret 513*99fb40d4SCraig Topperentry: 514*99fb40d4SCraig Topper %a = call <vscale x 8 x i16> @llvm.riscv.vslide1down.mask.nxv8i16.i16( 515*99fb40d4SCraig Topper <vscale x 8 x i16> %0, 516*99fb40d4SCraig Topper <vscale x 8 x i16> %1, 517*99fb40d4SCraig Topper i16 %2, 518*99fb40d4SCraig Topper <vscale x 8 x i1> %3, 519*99fb40d4SCraig Topper iXLen %4, iXLen 1) 520*99fb40d4SCraig Topper 521*99fb40d4SCraig Topper ret <vscale x 8 x i16> %a 522*99fb40d4SCraig Topper} 523*99fb40d4SCraig Topper 524*99fb40d4SCraig Topperdeclare <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16( 525*99fb40d4SCraig Topper <vscale x 16 x i16>, 526*99fb40d4SCraig Topper <vscale x 16 x i16>, 527*99fb40d4SCraig Topper i16, 528*99fb40d4SCraig Topper iXLen) 529*99fb40d4SCraig Topper 530*99fb40d4SCraig Topperdefine <vscale x 16 x i16> @intrinsic_vslide1down_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind { 531*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_vx_nxv16i16_nxv16i16_i16: 532*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 533*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 534*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v8, a0 535*99fb40d4SCraig Topper; CHECK-NEXT: ret 536*99fb40d4SCraig Topperentry: 537*99fb40d4SCraig Topper %a = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16( 538*99fb40d4SCraig Topper <vscale x 16 x i16> undef, 539*99fb40d4SCraig Topper <vscale x 16 x i16> %0, 540*99fb40d4SCraig Topper i16 %1, 541*99fb40d4SCraig Topper iXLen %2) 542*99fb40d4SCraig Topper 543*99fb40d4SCraig Topper ret <vscale x 16 x i16> %a 544*99fb40d4SCraig Topper} 545*99fb40d4SCraig Topper 546*99fb40d4SCraig Topperdeclare <vscale x 16 x i16> @llvm.riscv.vslide1down.mask.nxv16i16.i16( 547*99fb40d4SCraig Topper <vscale x 16 x i16>, 548*99fb40d4SCraig Topper <vscale x 16 x i16>, 549*99fb40d4SCraig Topper i16, 550*99fb40d4SCraig Topper <vscale x 16 x i1>, 551*99fb40d4SCraig Topper iXLen, 552*99fb40d4SCraig Topper iXLen) 553*99fb40d4SCraig Topper 554*99fb40d4SCraig Topperdefine <vscale x 16 x i16> @intrinsic_vslide1down_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 555*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i16_nxv16i16_i16: 556*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 557*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu 558*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v12, a0, v0.t 559*99fb40d4SCraig Topper; CHECK-NEXT: ret 560*99fb40d4SCraig Topperentry: 561*99fb40d4SCraig Topper %a = call <vscale x 16 x i16> @llvm.riscv.vslide1down.mask.nxv16i16.i16( 562*99fb40d4SCraig Topper <vscale x 16 x i16> %0, 563*99fb40d4SCraig Topper <vscale x 16 x i16> %1, 564*99fb40d4SCraig Topper i16 %2, 565*99fb40d4SCraig Topper <vscale x 16 x i1> %3, 566*99fb40d4SCraig Topper iXLen %4, iXLen 1) 567*99fb40d4SCraig Topper 568*99fb40d4SCraig Topper ret <vscale x 16 x i16> %a 569*99fb40d4SCraig Topper} 570*99fb40d4SCraig Topper 571*99fb40d4SCraig Topperdeclare <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16( 572*99fb40d4SCraig Topper <vscale x 32 x i16>, 573*99fb40d4SCraig Topper <vscale x 32 x i16>, 574*99fb40d4SCraig Topper i16, 575*99fb40d4SCraig Topper iXLen) 576*99fb40d4SCraig Topper 577*99fb40d4SCraig Topperdefine <vscale x 32 x i16> @intrinsic_vslide1down_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind { 578*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_vx_nxv32i16_nxv32i16_i16: 579*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 580*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma 581*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v8, a0 582*99fb40d4SCraig Topper; CHECK-NEXT: ret 583*99fb40d4SCraig Topperentry: 584*99fb40d4SCraig Topper %a = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16( 585*99fb40d4SCraig Topper <vscale x 32 x i16> undef, 586*99fb40d4SCraig Topper <vscale x 32 x i16> %0, 587*99fb40d4SCraig Topper i16 %1, 588*99fb40d4SCraig Topper iXLen %2) 589*99fb40d4SCraig Topper 590*99fb40d4SCraig Topper ret <vscale x 32 x i16> %a 591*99fb40d4SCraig Topper} 592*99fb40d4SCraig Topper 593*99fb40d4SCraig Topperdeclare <vscale x 32 x i16> @llvm.riscv.vslide1down.mask.nxv32i16.i16( 594*99fb40d4SCraig Topper <vscale x 32 x i16>, 595*99fb40d4SCraig Topper <vscale x 32 x i16>, 596*99fb40d4SCraig Topper i16, 597*99fb40d4SCraig Topper <vscale x 32 x i1>, 598*99fb40d4SCraig Topper iXLen, 599*99fb40d4SCraig Topper iXLen) 600*99fb40d4SCraig Topper 601*99fb40d4SCraig Topperdefine <vscale x 32 x i16> @intrinsic_vslide1down_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 602*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv32i16_nxv32i16_i16: 603*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 604*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu 605*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v16, a0, v0.t 606*99fb40d4SCraig Topper; CHECK-NEXT: ret 607*99fb40d4SCraig Topperentry: 608*99fb40d4SCraig Topper %a = call <vscale x 32 x i16> @llvm.riscv.vslide1down.mask.nxv32i16.i16( 609*99fb40d4SCraig Topper <vscale x 32 x i16> %0, 610*99fb40d4SCraig Topper <vscale x 32 x i16> %1, 611*99fb40d4SCraig Topper i16 %2, 612*99fb40d4SCraig Topper <vscale x 32 x i1> %3, 613*99fb40d4SCraig Topper iXLen %4, iXLen 1) 614*99fb40d4SCraig Topper 615*99fb40d4SCraig Topper ret <vscale x 32 x i16> %a 616*99fb40d4SCraig Topper} 617*99fb40d4SCraig Topper 618*99fb40d4SCraig Topperdeclare <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32( 619*99fb40d4SCraig Topper <vscale x 1 x i32>, 620*99fb40d4SCraig Topper <vscale x 1 x i32>, 621*99fb40d4SCraig Topper i32, 622*99fb40d4SCraig Topper iXLen) 623*99fb40d4SCraig Topper 624*99fb40d4SCraig Topperdefine <vscale x 1 x i32> @intrinsic_vslide1down_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind { 625*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i32_nxv1i32_i32: 626*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 627*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 628*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v8, a0 629*99fb40d4SCraig Topper; CHECK-NEXT: ret 630*99fb40d4SCraig Topperentry: 631*99fb40d4SCraig Topper %a = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32( 632*99fb40d4SCraig Topper <vscale x 1 x i32> undef, 633*99fb40d4SCraig Topper <vscale x 1 x i32> %0, 634*99fb40d4SCraig Topper i32 %1, 635*99fb40d4SCraig Topper iXLen %2) 636*99fb40d4SCraig Topper 637*99fb40d4SCraig Topper ret <vscale x 1 x i32> %a 638*99fb40d4SCraig Topper} 639*99fb40d4SCraig Topper 640*99fb40d4SCraig Topperdeclare <vscale x 1 x i32> @llvm.riscv.vslide1down.mask.nxv1i32.i32( 641*99fb40d4SCraig Topper <vscale x 1 x i32>, 642*99fb40d4SCraig Topper <vscale x 1 x i32>, 643*99fb40d4SCraig Topper i32, 644*99fb40d4SCraig Topper <vscale x 1 x i1>, 645*99fb40d4SCraig Topper iXLen, 646*99fb40d4SCraig Topper iXLen) 647*99fb40d4SCraig Topper 648*99fb40d4SCraig Topperdefine <vscale x 1 x i32> @intrinsic_vslide1down_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 649*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i32_nxv1i32_i32: 650*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 651*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu 652*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v9, a0, v0.t 653*99fb40d4SCraig Topper; CHECK-NEXT: ret 654*99fb40d4SCraig Topperentry: 655*99fb40d4SCraig Topper %a = call <vscale x 1 x i32> @llvm.riscv.vslide1down.mask.nxv1i32.i32( 656*99fb40d4SCraig Topper <vscale x 1 x i32> %0, 657*99fb40d4SCraig Topper <vscale x 1 x i32> %1, 658*99fb40d4SCraig Topper i32 %2, 659*99fb40d4SCraig Topper <vscale x 1 x i1> %3, 660*99fb40d4SCraig Topper iXLen %4, iXLen 1) 661*99fb40d4SCraig Topper 662*99fb40d4SCraig Topper ret <vscale x 1 x i32> %a 663*99fb40d4SCraig Topper} 664*99fb40d4SCraig Topper 665*99fb40d4SCraig Topperdeclare <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32( 666*99fb40d4SCraig Topper <vscale x 2 x i32>, 667*99fb40d4SCraig Topper <vscale x 2 x i32>, 668*99fb40d4SCraig Topper i32, 669*99fb40d4SCraig Topper iXLen) 670*99fb40d4SCraig Topper 671*99fb40d4SCraig Topperdefine <vscale x 2 x i32> @intrinsic_vslide1down_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind { 672*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i32_nxv2i32_i32: 673*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 674*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 675*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v8, a0 676*99fb40d4SCraig Topper; CHECK-NEXT: ret 677*99fb40d4SCraig Topperentry: 678*99fb40d4SCraig Topper %a = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32( 679*99fb40d4SCraig Topper <vscale x 2 x i32> undef, 680*99fb40d4SCraig Topper <vscale x 2 x i32> %0, 681*99fb40d4SCraig Topper i32 %1, 682*99fb40d4SCraig Topper iXLen %2) 683*99fb40d4SCraig Topper 684*99fb40d4SCraig Topper ret <vscale x 2 x i32> %a 685*99fb40d4SCraig Topper} 686*99fb40d4SCraig Topper 687*99fb40d4SCraig Topperdeclare <vscale x 2 x i32> @llvm.riscv.vslide1down.mask.nxv2i32.i32( 688*99fb40d4SCraig Topper <vscale x 2 x i32>, 689*99fb40d4SCraig Topper <vscale x 2 x i32>, 690*99fb40d4SCraig Topper i32, 691*99fb40d4SCraig Topper <vscale x 2 x i1>, 692*99fb40d4SCraig Topper iXLen, 693*99fb40d4SCraig Topper iXLen) 694*99fb40d4SCraig Topper 695*99fb40d4SCraig Topperdefine <vscale x 2 x i32> @intrinsic_vslide1down_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 696*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i32_nxv2i32_i32: 697*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 698*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu 699*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v9, a0, v0.t 700*99fb40d4SCraig Topper; CHECK-NEXT: ret 701*99fb40d4SCraig Topperentry: 702*99fb40d4SCraig Topper %a = call <vscale x 2 x i32> @llvm.riscv.vslide1down.mask.nxv2i32.i32( 703*99fb40d4SCraig Topper <vscale x 2 x i32> %0, 704*99fb40d4SCraig Topper <vscale x 2 x i32> %1, 705*99fb40d4SCraig Topper i32 %2, 706*99fb40d4SCraig Topper <vscale x 2 x i1> %3, 707*99fb40d4SCraig Topper iXLen %4, iXLen 1) 708*99fb40d4SCraig Topper 709*99fb40d4SCraig Topper ret <vscale x 2 x i32> %a 710*99fb40d4SCraig Topper} 711*99fb40d4SCraig Topper 712*99fb40d4SCraig Topperdeclare <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32( 713*99fb40d4SCraig Topper <vscale x 4 x i32>, 714*99fb40d4SCraig Topper <vscale x 4 x i32>, 715*99fb40d4SCraig Topper i32, 716*99fb40d4SCraig Topper iXLen) 717*99fb40d4SCraig Topper 718*99fb40d4SCraig Topperdefine <vscale x 4 x i32> @intrinsic_vslide1down_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind { 719*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i32_nxv4i32_i32: 720*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 721*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 722*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v8, a0 723*99fb40d4SCraig Topper; CHECK-NEXT: ret 724*99fb40d4SCraig Topperentry: 725*99fb40d4SCraig Topper %a = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32( 726*99fb40d4SCraig Topper <vscale x 4 x i32> undef, 727*99fb40d4SCraig Topper <vscale x 4 x i32> %0, 728*99fb40d4SCraig Topper i32 %1, 729*99fb40d4SCraig Topper iXLen %2) 730*99fb40d4SCraig Topper 731*99fb40d4SCraig Topper ret <vscale x 4 x i32> %a 732*99fb40d4SCraig Topper} 733*99fb40d4SCraig Topper 734*99fb40d4SCraig Topperdeclare <vscale x 4 x i32> @llvm.riscv.vslide1down.mask.nxv4i32.i32( 735*99fb40d4SCraig Topper <vscale x 4 x i32>, 736*99fb40d4SCraig Topper <vscale x 4 x i32>, 737*99fb40d4SCraig Topper i32, 738*99fb40d4SCraig Topper <vscale x 4 x i1>, 739*99fb40d4SCraig Topper iXLen, 740*99fb40d4SCraig Topper iXLen) 741*99fb40d4SCraig Topper 742*99fb40d4SCraig Topperdefine <vscale x 4 x i32> @intrinsic_vslide1down_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 743*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i32_nxv4i32_i32: 744*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 745*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu 746*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v10, a0, v0.t 747*99fb40d4SCraig Topper; CHECK-NEXT: ret 748*99fb40d4SCraig Topperentry: 749*99fb40d4SCraig Topper %a = call <vscale x 4 x i32> @llvm.riscv.vslide1down.mask.nxv4i32.i32( 750*99fb40d4SCraig Topper <vscale x 4 x i32> %0, 751*99fb40d4SCraig Topper <vscale x 4 x i32> %1, 752*99fb40d4SCraig Topper i32 %2, 753*99fb40d4SCraig Topper <vscale x 4 x i1> %3, 754*99fb40d4SCraig Topper iXLen %4, iXLen 1) 755*99fb40d4SCraig Topper 756*99fb40d4SCraig Topper ret <vscale x 4 x i32> %a 757*99fb40d4SCraig Topper} 758*99fb40d4SCraig Topper 759*99fb40d4SCraig Topperdeclare <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32( 760*99fb40d4SCraig Topper <vscale x 8 x i32>, 761*99fb40d4SCraig Topper <vscale x 8 x i32>, 762*99fb40d4SCraig Topper i32, 763*99fb40d4SCraig Topper iXLen) 764*99fb40d4SCraig Topper 765*99fb40d4SCraig Topperdefine <vscale x 8 x i32> @intrinsic_vslide1down_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind { 766*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i32_nxv8i32_i32: 767*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 768*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 769*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v8, a0 770*99fb40d4SCraig Topper; CHECK-NEXT: ret 771*99fb40d4SCraig Topperentry: 772*99fb40d4SCraig Topper %a = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32( 773*99fb40d4SCraig Topper <vscale x 8 x i32> undef, 774*99fb40d4SCraig Topper <vscale x 8 x i32> %0, 775*99fb40d4SCraig Topper i32 %1, 776*99fb40d4SCraig Topper iXLen %2) 777*99fb40d4SCraig Topper 778*99fb40d4SCraig Topper ret <vscale x 8 x i32> %a 779*99fb40d4SCraig Topper} 780*99fb40d4SCraig Topper 781*99fb40d4SCraig Topperdeclare <vscale x 8 x i32> @llvm.riscv.vslide1down.mask.nxv8i32.i32( 782*99fb40d4SCraig Topper <vscale x 8 x i32>, 783*99fb40d4SCraig Topper <vscale x 8 x i32>, 784*99fb40d4SCraig Topper i32, 785*99fb40d4SCraig Topper <vscale x 8 x i1>, 786*99fb40d4SCraig Topper iXLen, 787*99fb40d4SCraig Topper iXLen) 788*99fb40d4SCraig Topper 789*99fb40d4SCraig Topperdefine <vscale x 8 x i32> @intrinsic_vslide1down_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 790*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i32_nxv8i32_i32: 791*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 792*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu 793*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v12, a0, v0.t 794*99fb40d4SCraig Topper; CHECK-NEXT: ret 795*99fb40d4SCraig Topperentry: 796*99fb40d4SCraig Topper %a = call <vscale x 8 x i32> @llvm.riscv.vslide1down.mask.nxv8i32.i32( 797*99fb40d4SCraig Topper <vscale x 8 x i32> %0, 798*99fb40d4SCraig Topper <vscale x 8 x i32> %1, 799*99fb40d4SCraig Topper i32 %2, 800*99fb40d4SCraig Topper <vscale x 8 x i1> %3, 801*99fb40d4SCraig Topper iXLen %4, iXLen 1) 802*99fb40d4SCraig Topper 803*99fb40d4SCraig Topper ret <vscale x 8 x i32> %a 804*99fb40d4SCraig Topper} 805*99fb40d4SCraig Topper 806*99fb40d4SCraig Topperdeclare <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32( 807*99fb40d4SCraig Topper <vscale x 16 x i32>, 808*99fb40d4SCraig Topper <vscale x 16 x i32>, 809*99fb40d4SCraig Topper i32, 810*99fb40d4SCraig Topper iXLen) 811*99fb40d4SCraig Topper 812*99fb40d4SCraig Topperdefine <vscale x 16 x i32> @intrinsic_vslide1down_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind { 813*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_vx_nxv16i32_nxv16i32_i32: 814*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 815*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma 816*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v8, a0 817*99fb40d4SCraig Topper; CHECK-NEXT: ret 818*99fb40d4SCraig Topperentry: 819*99fb40d4SCraig Topper %a = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32( 820*99fb40d4SCraig Topper <vscale x 16 x i32> undef, 821*99fb40d4SCraig Topper <vscale x 16 x i32> %0, 822*99fb40d4SCraig Topper i32 %1, 823*99fb40d4SCraig Topper iXLen %2) 824*99fb40d4SCraig Topper 825*99fb40d4SCraig Topper ret <vscale x 16 x i32> %a 826*99fb40d4SCraig Topper} 827*99fb40d4SCraig Topper 828*99fb40d4SCraig Topperdeclare <vscale x 16 x i32> @llvm.riscv.vslide1down.mask.nxv16i32.i32( 829*99fb40d4SCraig Topper <vscale x 16 x i32>, 830*99fb40d4SCraig Topper <vscale x 16 x i32>, 831*99fb40d4SCraig Topper i32, 832*99fb40d4SCraig Topper <vscale x 16 x i1>, 833*99fb40d4SCraig Topper iXLen, 834*99fb40d4SCraig Topper iXLen) 835*99fb40d4SCraig Topper 836*99fb40d4SCraig Topperdefine <vscale x 16 x i32> @intrinsic_vslide1down_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 837*99fb40d4SCraig Topper; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i32_nxv16i32_i32: 838*99fb40d4SCraig Topper; CHECK: # %bb.0: # %entry 839*99fb40d4SCraig Topper; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu 840*99fb40d4SCraig Topper; CHECK-NEXT: vslide1down.vx v8, v16, a0, v0.t 841*99fb40d4SCraig Topper; CHECK-NEXT: ret 842*99fb40d4SCraig Topperentry: 843*99fb40d4SCraig Topper %a = call <vscale x 16 x i32> @llvm.riscv.vslide1down.mask.nxv16i32.i32( 844*99fb40d4SCraig Topper <vscale x 16 x i32> %0, 845*99fb40d4SCraig Topper <vscale x 16 x i32> %1, 846*99fb40d4SCraig Topper i32 %2, 847*99fb40d4SCraig Topper <vscale x 16 x i1> %3, 848*99fb40d4SCraig Topper iXLen %4, iXLen 1) 849*99fb40d4SCraig Topper 850*99fb40d4SCraig Topper ret <vscale x 16 x i32> %a 851*99fb40d4SCraig Topper} 852*99fb40d4SCraig Topper 853*99fb40d4SCraig Topperdeclare <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64( 854*99fb40d4SCraig Topper <vscale x 1 x i64>, 855*99fb40d4SCraig Topper <vscale x 1 x i64>, 856*99fb40d4SCraig Topper i64, 857*99fb40d4SCraig Topper iXLen) 858*99fb40d4SCraig Topper 859*99fb40d4SCraig Topperdefine <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind { 860*99fb40d4SCraig Topper; RV32-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64: 861*99fb40d4SCraig Topper; RV32: # %bb.0: # %entry 862*99fb40d4SCraig Topper; RV32-NEXT: vsetvli a2, a2, e64, m1, ta, ma 863*99fb40d4SCraig Topper; RV32-NEXT: slli a2, a2, 1 864*99fb40d4SCraig Topper; RV32-NEXT: vsetvli zero, a2, e32, m1, ta, ma 865*99fb40d4SCraig Topper; RV32-NEXT: vslide1down.vx v8, v8, a0 866*99fb40d4SCraig Topper; RV32-NEXT: vslide1down.vx v8, v8, a1 867*99fb40d4SCraig Topper; RV32-NEXT: ret 868*99fb40d4SCraig Topper; 869*99fb40d4SCraig Topper; RV64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64: 870*99fb40d4SCraig Topper; RV64: # %bb.0: # %entry 871*99fb40d4SCraig Topper; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma 872*99fb40d4SCraig Topper; RV64-NEXT: vslide1down.vx v8, v8, a0 873*99fb40d4SCraig Topper; RV64-NEXT: ret 874*99fb40d4SCraig Topperentry: 875*99fb40d4SCraig Topper %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64( 876*99fb40d4SCraig Topper <vscale x 1 x i64> undef, 877*99fb40d4SCraig Topper <vscale x 1 x i64> %0, 878*99fb40d4SCraig Topper i64 %1, 879*99fb40d4SCraig Topper iXLen %2) 880*99fb40d4SCraig Topper 881*99fb40d4SCraig Topper ret <vscale x 1 x i64> %a 882*99fb40d4SCraig Topper} 883*99fb40d4SCraig Topper 884*99fb40d4SCraig Topperdeclare <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64( 885*99fb40d4SCraig Topper <vscale x 1 x i64>, 886*99fb40d4SCraig Topper <vscale x 1 x i64>, 887*99fb40d4SCraig Topper i64, 888*99fb40d4SCraig Topper <vscale x 1 x i1>, 889*99fb40d4SCraig Topper iXLen, 890*99fb40d4SCraig Topper iXLen) 891*99fb40d4SCraig Topper 892*99fb40d4SCraig Topperdefine <vscale x 1 x i64> @intrinsic_vslide1down_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 893*99fb40d4SCraig Topper; RV32-LABEL: intrinsic_vslide1down_mask_vx_nxv1i64_nxv1i64_i64: 894*99fb40d4SCraig Topper; RV32: # %bb.0: # %entry 895*99fb40d4SCraig Topper; RV32-NEXT: vsetvli a3, a2, e64, m1, ta, ma 896*99fb40d4SCraig Topper; RV32-NEXT: slli a3, a3, 1 897*99fb40d4SCraig Topper; RV32-NEXT: vsetvli zero, a3, e32, m1, ta, ma 898*99fb40d4SCraig Topper; RV32-NEXT: vslide1down.vx v9, v9, a0 899*99fb40d4SCraig Topper; RV32-NEXT: vslide1down.vx v9, v9, a1 900*99fb40d4SCraig Topper; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma 901*99fb40d4SCraig Topper; RV32-NEXT: vmerge.vvm v8, v8, v9, v0 902*99fb40d4SCraig Topper; RV32-NEXT: ret 903*99fb40d4SCraig Topper; 904*99fb40d4SCraig Topper; RV64-LABEL: intrinsic_vslide1down_mask_vx_nxv1i64_nxv1i64_i64: 905*99fb40d4SCraig Topper; RV64: # %bb.0: # %entry 906*99fb40d4SCraig Topper; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu 907*99fb40d4SCraig Topper; RV64-NEXT: vslide1down.vx v8, v9, a0, v0.t 908*99fb40d4SCraig Topper; RV64-NEXT: ret 909*99fb40d4SCraig Topperentry: 910*99fb40d4SCraig Topper %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64( 911*99fb40d4SCraig Topper <vscale x 1 x i64> %0, 912*99fb40d4SCraig Topper <vscale x 1 x i64> %1, 913*99fb40d4SCraig Topper i64 %2, 914*99fb40d4SCraig Topper <vscale x 1 x i1> %3, 915*99fb40d4SCraig Topper iXLen %4, iXLen 1) 916*99fb40d4SCraig Topper 917*99fb40d4SCraig Topper ret <vscale x 1 x i64> %a 918*99fb40d4SCraig Topper} 919*99fb40d4SCraig Topper 920*99fb40d4SCraig Topperdeclare <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64( 921*99fb40d4SCraig Topper <vscale x 2 x i64>, 922*99fb40d4SCraig Topper <vscale x 2 x i64>, 923*99fb40d4SCraig Topper i64, 924*99fb40d4SCraig Topper iXLen) 925*99fb40d4SCraig Topper 926*99fb40d4SCraig Topperdefine <vscale x 2 x i64> @intrinsic_vslide1down_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind { 927*99fb40d4SCraig Topper; RV32-LABEL: intrinsic_vslide1down_vx_nxv2i64_nxv2i64_i64: 928*99fb40d4SCraig Topper; RV32: # %bb.0: # %entry 929*99fb40d4SCraig Topper; RV32-NEXT: vsetvli a2, a2, e64, m2, ta, ma 930*99fb40d4SCraig Topper; RV32-NEXT: slli a2, a2, 1 931*99fb40d4SCraig Topper; RV32-NEXT: vsetvli zero, a2, e32, m2, ta, ma 932*99fb40d4SCraig Topper; RV32-NEXT: vslide1down.vx v8, v8, a0 933*99fb40d4SCraig Topper; RV32-NEXT: vslide1down.vx v8, v8, a1 934*99fb40d4SCraig Topper; RV32-NEXT: ret 935*99fb40d4SCraig Topper; 936*99fb40d4SCraig Topper; RV64-LABEL: intrinsic_vslide1down_vx_nxv2i64_nxv2i64_i64: 937*99fb40d4SCraig Topper; RV64: # %bb.0: # %entry 938*99fb40d4SCraig Topper; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma 939*99fb40d4SCraig Topper; RV64-NEXT: vslide1down.vx v8, v8, a0 940*99fb40d4SCraig Topper; RV64-NEXT: ret 941*99fb40d4SCraig Topperentry: 942*99fb40d4SCraig Topper %a = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64( 943*99fb40d4SCraig Topper <vscale x 2 x i64> undef, 944*99fb40d4SCraig Topper <vscale x 2 x i64> %0, 945*99fb40d4SCraig Topper i64 %1, 946*99fb40d4SCraig Topper iXLen %2) 947*99fb40d4SCraig Topper 948*99fb40d4SCraig Topper ret <vscale x 2 x i64> %a 949*99fb40d4SCraig Topper} 950*99fb40d4SCraig Topper 951*99fb40d4SCraig Topperdeclare <vscale x 2 x i64> @llvm.riscv.vslide1down.mask.nxv2i64.i64( 952*99fb40d4SCraig Topper <vscale x 2 x i64>, 953*99fb40d4SCraig Topper <vscale x 2 x i64>, 954*99fb40d4SCraig Topper i64, 955*99fb40d4SCraig Topper <vscale x 2 x i1>, 956*99fb40d4SCraig Topper iXLen, 957*99fb40d4SCraig Topper iXLen) 958*99fb40d4SCraig Topper 959*99fb40d4SCraig Topperdefine <vscale x 2 x i64> @intrinsic_vslide1down_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 960*99fb40d4SCraig Topper; RV32-LABEL: intrinsic_vslide1down_mask_vx_nxv2i64_nxv2i64_i64: 961*99fb40d4SCraig Topper; RV32: # %bb.0: # %entry 962*99fb40d4SCraig Topper; RV32-NEXT: vsetvli a3, a2, e64, m2, ta, ma 963*99fb40d4SCraig Topper; RV32-NEXT: slli a3, a3, 1 964*99fb40d4SCraig Topper; RV32-NEXT: vsetvli zero, a3, e32, m2, ta, ma 965*99fb40d4SCraig Topper; RV32-NEXT: vslide1down.vx v10, v10, a0 966*99fb40d4SCraig Topper; RV32-NEXT: vslide1down.vx v10, v10, a1 967*99fb40d4SCraig Topper; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma 968*99fb40d4SCraig Topper; RV32-NEXT: vmerge.vvm v8, v8, v10, v0 969*99fb40d4SCraig Topper; RV32-NEXT: ret 970*99fb40d4SCraig Topper; 971*99fb40d4SCraig Topper; RV64-LABEL: intrinsic_vslide1down_mask_vx_nxv2i64_nxv2i64_i64: 972*99fb40d4SCraig Topper; RV64: # %bb.0: # %entry 973*99fb40d4SCraig Topper; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu 974*99fb40d4SCraig Topper; RV64-NEXT: vslide1down.vx v8, v10, a0, v0.t 975*99fb40d4SCraig Topper; RV64-NEXT: ret 976*99fb40d4SCraig Topperentry: 977*99fb40d4SCraig Topper %a = call <vscale x 2 x i64> @llvm.riscv.vslide1down.mask.nxv2i64.i64( 978*99fb40d4SCraig Topper <vscale x 2 x i64> %0, 979*99fb40d4SCraig Topper <vscale x 2 x i64> %1, 980*99fb40d4SCraig Topper i64 %2, 981*99fb40d4SCraig Topper <vscale x 2 x i1> %3, 982*99fb40d4SCraig Topper iXLen %4, iXLen 1) 983*99fb40d4SCraig Topper 984*99fb40d4SCraig Topper ret <vscale x 2 x i64> %a 985*99fb40d4SCraig Topper} 986*99fb40d4SCraig Topper 987*99fb40d4SCraig Topperdeclare <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64( 988*99fb40d4SCraig Topper <vscale x 4 x i64>, 989*99fb40d4SCraig Topper <vscale x 4 x i64>, 990*99fb40d4SCraig Topper i64, 991*99fb40d4SCraig Topper iXLen) 992*99fb40d4SCraig Topper 993*99fb40d4SCraig Topperdefine <vscale x 4 x i64> @intrinsic_vslide1down_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind { 994*99fb40d4SCraig Topper; RV32-LABEL: intrinsic_vslide1down_vx_nxv4i64_nxv4i64_i64: 995*99fb40d4SCraig Topper; RV32: # %bb.0: # %entry 996*99fb40d4SCraig Topper; RV32-NEXT: vsetvli a2, a2, e64, m4, ta, ma 997*99fb40d4SCraig Topper; RV32-NEXT: slli a2, a2, 1 998*99fb40d4SCraig Topper; RV32-NEXT: vsetvli zero, a2, e32, m4, ta, ma 999*99fb40d4SCraig Topper; RV32-NEXT: vslide1down.vx v8, v8, a0 1000*99fb40d4SCraig Topper; RV32-NEXT: vslide1down.vx v8, v8, a1 1001*99fb40d4SCraig Topper; RV32-NEXT: ret 1002*99fb40d4SCraig Topper; 1003*99fb40d4SCraig Topper; RV64-LABEL: intrinsic_vslide1down_vx_nxv4i64_nxv4i64_i64: 1004*99fb40d4SCraig Topper; RV64: # %bb.0: # %entry 1005*99fb40d4SCraig Topper; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma 1006*99fb40d4SCraig Topper; RV64-NEXT: vslide1down.vx v8, v8, a0 1007*99fb40d4SCraig Topper; RV64-NEXT: ret 1008*99fb40d4SCraig Topperentry: 1009*99fb40d4SCraig Topper %a = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64( 1010*99fb40d4SCraig Topper <vscale x 4 x i64> undef, 1011*99fb40d4SCraig Topper <vscale x 4 x i64> %0, 1012*99fb40d4SCraig Topper i64 %1, 1013*99fb40d4SCraig Topper iXLen %2) 1014*99fb40d4SCraig Topper 1015*99fb40d4SCraig Topper ret <vscale x 4 x i64> %a 1016*99fb40d4SCraig Topper} 1017*99fb40d4SCraig Topper 1018*99fb40d4SCraig Topperdeclare <vscale x 4 x i64> @llvm.riscv.vslide1down.mask.nxv4i64.i64( 1019*99fb40d4SCraig Topper <vscale x 4 x i64>, 1020*99fb40d4SCraig Topper <vscale x 4 x i64>, 1021*99fb40d4SCraig Topper i64, 1022*99fb40d4SCraig Topper <vscale x 4 x i1>, 1023*99fb40d4SCraig Topper iXLen, 1024*99fb40d4SCraig Topper iXLen) 1025*99fb40d4SCraig Topper 1026*99fb40d4SCraig Topperdefine <vscale x 4 x i64> @intrinsic_vslide1down_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1027*99fb40d4SCraig Topper; RV32-LABEL: intrinsic_vslide1down_mask_vx_nxv4i64_nxv4i64_i64: 1028*99fb40d4SCraig Topper; RV32: # %bb.0: # %entry 1029*99fb40d4SCraig Topper; RV32-NEXT: vsetvli a3, a2, e64, m4, ta, ma 1030*99fb40d4SCraig Topper; RV32-NEXT: slli a3, a3, 1 1031*99fb40d4SCraig Topper; RV32-NEXT: vsetvli zero, a3, e32, m4, ta, ma 1032*99fb40d4SCraig Topper; RV32-NEXT: vslide1down.vx v12, v12, a0 1033*99fb40d4SCraig Topper; RV32-NEXT: vslide1down.vx v12, v12, a1 1034*99fb40d4SCraig Topper; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma 1035*99fb40d4SCraig Topper; RV32-NEXT: vmerge.vvm v8, v8, v12, v0 1036*99fb40d4SCraig Topper; RV32-NEXT: ret 1037*99fb40d4SCraig Topper; 1038*99fb40d4SCraig Topper; RV64-LABEL: intrinsic_vslide1down_mask_vx_nxv4i64_nxv4i64_i64: 1039*99fb40d4SCraig Topper; RV64: # %bb.0: # %entry 1040*99fb40d4SCraig Topper; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu 1041*99fb40d4SCraig Topper; RV64-NEXT: vslide1down.vx v8, v12, a0, v0.t 1042*99fb40d4SCraig Topper; RV64-NEXT: ret 1043*99fb40d4SCraig Topperentry: 1044*99fb40d4SCraig Topper %a = call <vscale x 4 x i64> @llvm.riscv.vslide1down.mask.nxv4i64.i64( 1045*99fb40d4SCraig Topper <vscale x 4 x i64> %0, 1046*99fb40d4SCraig Topper <vscale x 4 x i64> %1, 1047*99fb40d4SCraig Topper i64 %2, 1048*99fb40d4SCraig Topper <vscale x 4 x i1> %3, 1049*99fb40d4SCraig Topper iXLen %4, iXLen 1) 1050*99fb40d4SCraig Topper 1051*99fb40d4SCraig Topper ret <vscale x 4 x i64> %a 1052*99fb40d4SCraig Topper} 1053*99fb40d4SCraig Topper 1054*99fb40d4SCraig Topperdeclare <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64( 1055*99fb40d4SCraig Topper <vscale x 8 x i64>, 1056*99fb40d4SCraig Topper <vscale x 8 x i64>, 1057*99fb40d4SCraig Topper i64, 1058*99fb40d4SCraig Topper iXLen) 1059*99fb40d4SCraig Topper 1060*99fb40d4SCraig Topperdefine <vscale x 8 x i64> @intrinsic_vslide1down_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind { 1061*99fb40d4SCraig Topper; RV32-LABEL: intrinsic_vslide1down_vx_nxv8i64_nxv8i64_i64: 1062*99fb40d4SCraig Topper; RV32: # %bb.0: # %entry 1063*99fb40d4SCraig Topper; RV32-NEXT: vsetvli a2, a2, e64, m8, ta, ma 1064*99fb40d4SCraig Topper; RV32-NEXT: slli a2, a2, 1 1065*99fb40d4SCraig Topper; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma 1066*99fb40d4SCraig Topper; RV32-NEXT: vslide1down.vx v8, v8, a0 1067*99fb40d4SCraig Topper; RV32-NEXT: vslide1down.vx v8, v8, a1 1068*99fb40d4SCraig Topper; RV32-NEXT: ret 1069*99fb40d4SCraig Topper; 1070*99fb40d4SCraig Topper; RV64-LABEL: intrinsic_vslide1down_vx_nxv8i64_nxv8i64_i64: 1071*99fb40d4SCraig Topper; RV64: # %bb.0: # %entry 1072*99fb40d4SCraig Topper; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma 1073*99fb40d4SCraig Topper; RV64-NEXT: vslide1down.vx v8, v8, a0 1074*99fb40d4SCraig Topper; RV64-NEXT: ret 1075*99fb40d4SCraig Topperentry: 1076*99fb40d4SCraig Topper %a = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64( 1077*99fb40d4SCraig Topper <vscale x 8 x i64> undef, 1078*99fb40d4SCraig Topper <vscale x 8 x i64> %0, 1079*99fb40d4SCraig Topper i64 %1, 1080*99fb40d4SCraig Topper iXLen %2) 1081*99fb40d4SCraig Topper 1082*99fb40d4SCraig Topper ret <vscale x 8 x i64> %a 1083*99fb40d4SCraig Topper} 1084*99fb40d4SCraig Topper 1085*99fb40d4SCraig Topperdeclare <vscale x 8 x i64> @llvm.riscv.vslide1down.mask.nxv8i64.i64( 1086*99fb40d4SCraig Topper <vscale x 8 x i64>, 1087*99fb40d4SCraig Topper <vscale x 8 x i64>, 1088*99fb40d4SCraig Topper i64, 1089*99fb40d4SCraig Topper <vscale x 8 x i1>, 1090*99fb40d4SCraig Topper iXLen, 1091*99fb40d4SCraig Topper iXLen) 1092*99fb40d4SCraig Topper 1093*99fb40d4SCraig Topperdefine <vscale x 8 x i64> @intrinsic_vslide1down_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1094*99fb40d4SCraig Topper; RV32-LABEL: intrinsic_vslide1down_mask_vx_nxv8i64_nxv8i64_i64: 1095*99fb40d4SCraig Topper; RV32: # %bb.0: # %entry 1096*99fb40d4SCraig Topper; RV32-NEXT: vsetvli a3, a2, e64, m8, ta, ma 1097*99fb40d4SCraig Topper; RV32-NEXT: slli a3, a3, 1 1098*99fb40d4SCraig Topper; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma 1099*99fb40d4SCraig Topper; RV32-NEXT: vslide1down.vx v16, v16, a0 1100*99fb40d4SCraig Topper; RV32-NEXT: vslide1down.vx v16, v16, a1 1101*99fb40d4SCraig Topper; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma 1102*99fb40d4SCraig Topper; RV32-NEXT: vmerge.vvm v8, v8, v16, v0 1103*99fb40d4SCraig Topper; RV32-NEXT: ret 1104*99fb40d4SCraig Topper; 1105*99fb40d4SCraig Topper; RV64-LABEL: intrinsic_vslide1down_mask_vx_nxv8i64_nxv8i64_i64: 1106*99fb40d4SCraig Topper; RV64: # %bb.0: # %entry 1107*99fb40d4SCraig Topper; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu 1108*99fb40d4SCraig Topper; RV64-NEXT: vslide1down.vx v8, v16, a0, v0.t 1109*99fb40d4SCraig Topper; RV64-NEXT: ret 1110*99fb40d4SCraig Topperentry: 1111*99fb40d4SCraig Topper %a = call <vscale x 8 x i64> @llvm.riscv.vslide1down.mask.nxv8i64.i64( 1112*99fb40d4SCraig Topper <vscale x 8 x i64> %0, 1113*99fb40d4SCraig Topper <vscale x 8 x i64> %1, 1114*99fb40d4SCraig Topper i64 %2, 1115*99fb40d4SCraig Topper <vscale x 8 x i1> %3, 1116*99fb40d4SCraig Topper iXLen %4, iXLen 1) 1117*99fb40d4SCraig Topper 1118*99fb40d4SCraig Topper ret <vscale x 8 x i64> %a 1119*99fb40d4SCraig Topper} 1120