1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s 3; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s 4 5declare <vscale x 2 x i1> @llvm.vp.trunc.nxv2i1.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i32) 6 7define <vscale x 2 x i1> @vtrunc_nxv2i1_nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i1> %m, i32 zeroext %vl) { 8; CHECK-LABEL: vtrunc_nxv2i1_nxv2i16: 9; CHECK: # %bb.0: 10; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 11; CHECK-NEXT: vand.vi v8, v8, 1, v0.t 12; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t 13; CHECK-NEXT: ret 14 %v = call <vscale x 2 x i1> @llvm.vp.trunc.nxv2i1.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i1> %m, i32 %vl) 15 ret <vscale x 2 x i1> %v 16} 17 18define <vscale x 2 x i1> @vtrunc_nxv2i1_nxv2i16_unmasked(<vscale x 2 x i16> %a, i32 zeroext %vl) { 19; CHECK-LABEL: vtrunc_nxv2i1_nxv2i16_unmasked: 20; CHECK: # %bb.0: 21; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 22; CHECK-NEXT: vand.vi v8, v8, 1 23; CHECK-NEXT: vmsne.vi v0, v8, 0 24; CHECK-NEXT: ret 25 %v = call <vscale x 2 x i1> @llvm.vp.trunc.nxv2i1.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i1> splat (i1 true), i32 %vl) 26 ret <vscale x 2 x i1> %v 27} 28 29declare <vscale x 2 x i1> @llvm.vp.trunc.nxv2i1.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32) 30 31define <vscale x 2 x i1> @vtrunc_nxv2i1_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i32 zeroext %vl) { 32; CHECK-LABEL: vtrunc_nxv2i1_nxv2i32: 33; CHECK: # %bb.0: 34; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 35; CHECK-NEXT: vand.vi v8, v8, 1, v0.t 36; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t 37; CHECK-NEXT: ret 38 %v = call <vscale x 2 x i1> @llvm.vp.trunc.nxv2i1.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i32 %vl) 39 ret <vscale x 2 x i1> %v 40} 41 42define <vscale x 2 x i1> @vtrunc_nxv2i1_nxv2i32_unmasked(<vscale x 2 x i32> %a, i32 zeroext %vl) { 43; CHECK-LABEL: vtrunc_nxv2i1_nxv2i32_unmasked: 44; CHECK: # %bb.0: 45; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 46; CHECK-NEXT: vand.vi v8, v8, 1 47; CHECK-NEXT: vmsne.vi v0, v8, 0 48; CHECK-NEXT: ret 49 %v = call <vscale x 2 x i1> @llvm.vp.trunc.nxv2i1.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 true), i32 %vl) 50 ret <vscale x 2 x i1> %v 51} 52 53declare <vscale x 2 x i1> @llvm.vp.trunc.nxv2i1.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i32) 54 55define <vscale x 2 x i1> @vtrunc_nxv2i1_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %m, i32 zeroext %vl) { 56; CHECK-LABEL: vtrunc_nxv2i1_nxv2i64: 57; CHECK: # %bb.0: 58; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 59; CHECK-NEXT: vand.vi v10, v8, 1, v0.t 60; CHECK-NEXT: vmsne.vi v8, v10, 0, v0.t 61; CHECK-NEXT: vmv1r.v v0, v8 62; CHECK-NEXT: ret 63 %v = call <vscale x 2 x i1> @llvm.vp.trunc.nxv2i1.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %m, i32 %vl) 64 ret <vscale x 2 x i1> %v 65} 66 67define <vscale x 2 x i1> @vtrunc_nxv2i1_nxv2i64_unmasked(<vscale x 2 x i64> %a, i32 zeroext %vl) { 68; CHECK-LABEL: vtrunc_nxv2i1_nxv2i64_unmasked: 69; CHECK: # %bb.0: 70; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 71; CHECK-NEXT: vand.vi v8, v8, 1 72; CHECK-NEXT: vmsne.vi v0, v8, 0 73; CHECK-NEXT: ret 74 %v = call <vscale x 2 x i1> @llvm.vp.trunc.nxv2i1.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> splat (i1 true), i32 %vl) 75 ret <vscale x 2 x i1> %v 76} 77