1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zvfh,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFH 3; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zvfh,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFH 4; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zvfhmin,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN 5; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zvfhmin,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN 6 7define <vscale x 2 x i1> @vfptoui_nxv2i1_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) { 8; CHECK-LABEL: vfptoui_nxv2i1_nxv2bf16: 9; CHECK: # %bb.0: 10; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 11; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8 12; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma 13; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v9, v0.t 14; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t 15; CHECK-NEXT: ret 16 %v = call <vscale x 2 x i1> @llvm.vp.fptoui.nxv2i1.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 %evl) 17 ret <vscale x 2 x i1> %v 18} 19 20define <vscale x 2 x i1> @vfptoui_nxv2i1_nxv2bf16_unmasked(<vscale x 2 x bfloat> %va, i32 zeroext %evl) { 21; CHECK-LABEL: vfptoui_nxv2i1_nxv2bf16_unmasked: 22; CHECK: # %bb.0: 23; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 24; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8 25; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma 26; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v9 27; CHECK-NEXT: vmsne.vi v0, v8, 0 28; CHECK-NEXT: ret 29 %v = call <vscale x 2 x i1> @llvm.vp.fptoui.nxv2i1.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl) 30 ret <vscale x 2 x i1> %v 31} 32 33declare <vscale x 2 x i1> @llvm.vp.fptoui.nxv2i1.nxv2f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32) 34 35define <vscale x 2 x i1> @vfptoui_nxv2i1_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) { 36; ZVFH-LABEL: vfptoui_nxv2i1_nxv2f16: 37; ZVFH: # %bb.0: 38; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 39; ZVFH-NEXT: vfcvt.rtz.xu.f.v v8, v8, v0.t 40; ZVFH-NEXT: vmsne.vi v0, v8, 0, v0.t 41; ZVFH-NEXT: ret 42; 43; ZVFHMIN-LABEL: vfptoui_nxv2i1_nxv2f16: 44; ZVFHMIN: # %bb.0: 45; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 46; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 47; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma 48; ZVFHMIN-NEXT: vfcvt.rtz.xu.f.v v8, v9, v0.t 49; ZVFHMIN-NEXT: vmsne.vi v0, v8, 0, v0.t 50; ZVFHMIN-NEXT: ret 51 %v = call <vscale x 2 x i1> @llvm.vp.fptoui.nxv2i1.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl) 52 ret <vscale x 2 x i1> %v 53} 54 55define <vscale x 2 x i1> @vfptoui_nxv2i1_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) { 56; ZVFH-LABEL: vfptoui_nxv2i1_nxv2f16_unmasked: 57; ZVFH: # %bb.0: 58; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 59; ZVFH-NEXT: vfcvt.rtz.xu.f.v v8, v8 60; ZVFH-NEXT: vmsne.vi v0, v8, 0 61; ZVFH-NEXT: ret 62; 63; ZVFHMIN-LABEL: vfptoui_nxv2i1_nxv2f16_unmasked: 64; ZVFHMIN: # %bb.0: 65; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 66; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 67; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma 68; ZVFHMIN-NEXT: vfcvt.rtz.xu.f.v v8, v9 69; ZVFHMIN-NEXT: vmsne.vi v0, v8, 0 70; ZVFHMIN-NEXT: ret 71 %v = call <vscale x 2 x i1> @llvm.vp.fptoui.nxv2i1.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl) 72 ret <vscale x 2 x i1> %v 73} 74 75declare <vscale x 2 x i1> @llvm.vp.fptoui.nxv2i1.nxv2f32(<vscale x 2 x float>, <vscale x 2 x i1>, i32) 76 77define <vscale x 2 x i1> @vfptoui_nxv2i1_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) { 78; CHECK-LABEL: vfptoui_nxv2i1_nxv2f32: 79; CHECK: # %bb.0: 80; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 81; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8, v0.t 82; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t 83; CHECK-NEXT: ret 84 %v = call <vscale x 2 x i1> @llvm.vp.fptoui.nxv2i1.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl) 85 ret <vscale x 2 x i1> %v 86} 87 88define <vscale x 2 x i1> @vfptoui_nxv2i1_nxv2f32_unmasked(<vscale x 2 x float> %va, i32 zeroext %evl) { 89; CHECK-LABEL: vfptoui_nxv2i1_nxv2f32_unmasked: 90; CHECK: # %bb.0: 91; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 92; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 93; CHECK-NEXT: vmsne.vi v0, v8, 0 94; CHECK-NEXT: ret 95 %v = call <vscale x 2 x i1> @llvm.vp.fptoui.nxv2i1.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl) 96 ret <vscale x 2 x i1> %v 97} 98 99declare <vscale x 2 x i1> @llvm.vp.fptoui.nxv2i1.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32) 100 101define <vscale x 2 x i1> @vfptoui_nxv2i1_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) { 102; CHECK-LABEL: vfptoui_nxv2i1_nxv2f64: 103; CHECK: # %bb.0: 104; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 105; CHECK-NEXT: vfcvt.rtz.xu.f.v v10, v8, v0.t 106; CHECK-NEXT: vmsne.vi v8, v10, 0, v0.t 107; CHECK-NEXT: vmv1r.v v0, v8 108; CHECK-NEXT: ret 109 %v = call <vscale x 2 x i1> @llvm.vp.fptoui.nxv2i1.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl) 110 ret <vscale x 2 x i1> %v 111} 112 113define <vscale x 2 x i1> @vfptoui_nxv2i1_nxv2f64_unmasked(<vscale x 2 x double> %va, i32 zeroext %evl) { 114; CHECK-LABEL: vfptoui_nxv2i1_nxv2f64_unmasked: 115; CHECK: # %bb.0: 116; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 117; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 118; CHECK-NEXT: vmsne.vi v0, v8, 0 119; CHECK-NEXT: ret 120 %v = call <vscale x 2 x i1> @llvm.vp.fptoui.nxv2i1.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl) 121 ret <vscale x 2 x i1> %v 122} 123