1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=riscv64 -mattr=+v -verify-machineinstrs | FileCheck %s 3 4define <vscale x 1 x i8> @vpmerge_mf8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %y, i32 zeroext %vl) { 5; CHECK-LABEL: vpmerge_mf8: 6; CHECK: # %bb.0: 7; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma 8; CHECK-NEXT: vmv.v.v v8, v9 9; CHECK-NEXT: ret 10 %1 = call <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i8> %y, <vscale x 1 x i8> %x, i32 %vl) 11 ret <vscale x 1 x i8> %1 12} 13 14define <vscale x 2 x i8> @vpmerge_mf4(<vscale x 2 x i8> %x, <vscale x 2 x i8> %y, i32 zeroext %vl) { 15; CHECK-LABEL: vpmerge_mf4: 16; CHECK: # %bb.0: 17; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma 18; CHECK-NEXT: vmv.v.v v8, v9 19; CHECK-NEXT: ret 20 %1 = call <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i8> %y, <vscale x 2 x i8> %x, i32 %vl) 21 ret <vscale x 2 x i8> %1 22} 23 24define <vscale x 4 x i8> @vpmerge_mf2(<vscale x 4 x i8> %x, <vscale x 4 x i8> %y, i32 zeroext %vl) { 25; CHECK-LABEL: vpmerge_mf2: 26; CHECK: # %bb.0: 27; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma 28; CHECK-NEXT: vmv.v.v v8, v9 29; CHECK-NEXT: ret 30 %1 = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i8> %y, <vscale x 4 x i8> %x, i32 %vl) 31 ret <vscale x 4 x i8> %1 32} 33 34define <vscale x 8 x i8> @vpmerge_m1(<vscale x 8 x i8> %x, <vscale x 8 x i8> %y, i32 zeroext %vl) { 35; CHECK-LABEL: vpmerge_m1: 36; CHECK: # %bb.0: 37; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma 38; CHECK-NEXT: vmv.v.v v8, v9 39; CHECK-NEXT: ret 40 %1 = call <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i8> %y, <vscale x 8 x i8> %x, i32 %vl) 41 ret <vscale x 8 x i8> %1 42} 43 44define <vscale x 8 x i16> @vpmerge_m2(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, i32 zeroext %vl) { 45; CHECK-LABEL: vpmerge_m2: 46; CHECK: # %bb.0: 47; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma 48; CHECK-NEXT: vmv.v.v v8, v10 49; CHECK-NEXT: ret 50 %1 = call <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i16> %y, <vscale x 8 x i16> %x, i32 %vl) 51 ret <vscale x 8 x i16> %1 52} 53 54define <vscale x 8 x i32> @vpmerge_m4(<vscale x 8 x i32> %x, <vscale x 8 x i32> %y, i32 zeroext %vl) { 55; CHECK-LABEL: vpmerge_m4: 56; CHECK: # %bb.0: 57; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma 58; CHECK-NEXT: vmv.v.v v8, v12 59; CHECK-NEXT: ret 60 %1 = call <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i32> %y, <vscale x 8 x i32> %x, i32 %vl) 61 ret <vscale x 8 x i32> %1 62} 63 64define <vscale x 8 x i64> @vpmerge_m8(<vscale x 8 x i64> %x, <vscale x 8 x i64> %y, i32 zeroext %vl) { 65; CHECK-LABEL: vpmerge_m8: 66; CHECK: # %bb.0: 67; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma 68; CHECK-NEXT: vmv.v.v v8, v16 69; CHECK-NEXT: ret 70 %1 = call <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i64> %y, <vscale x 8 x i64> %x, i32 %vl) 71 ret <vscale x 8 x i64> %1 72} 73 74declare <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1>, <vscale x 1 x i8>, <vscale x 1 x i8>, i32) 75declare <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1>, <vscale x 2 x i8>, <vscale x 2 x i8>, i32) 76declare <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1>, <vscale x 4 x i8>, <vscale x 4 x i8>, i32) 77declare <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1>, <vscale x 8 x i8>, <vscale x 8 x i8>, i32) 78declare <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32) 79declare <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1>, <vscale x 8 x i32>, <vscale x 8 x i32>, i32) 80declare <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1>, <vscale x 8 x i64>, <vscale x 8 x i64>, i32) 81