1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \ 3; RUN: -verify-machineinstrs < %s | FileCheck %s 4; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ 5; RUN: -verify-machineinstrs < %s | FileCheck %s 6 7declare <vscale x 1 x i1> @llvm.experimental.constrained.fptosi.nxv1i1.nxv1f16(<vscale x 1 x half>, metadata) 8define <vscale x 1 x i1> @vfptosi_nxv1f16_nxv1i1(<vscale x 1 x half> %va) strictfp { 9; CHECK-LABEL: vfptosi_nxv1f16_nxv1i1: 10; CHECK: # %bb.0: 11; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma 12; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 13; CHECK-NEXT: vand.vi v8, v9, 1 14; CHECK-NEXT: vmsne.vi v0, v8, 0 15; CHECK-NEXT: ret 16 %evec = call <vscale x 1 x i1> @llvm.experimental.constrained.fptosi.nxv1i1.nxv1f16(<vscale x 1 x half> %va, metadata !"fpexcept.strict") 17 ret <vscale x 1 x i1> %evec 18} 19 20declare <vscale x 1 x i1> @llvm.experimental.constrained.fptoui.nxv1i1.nxv1f16(<vscale x 1 x half>, metadata) 21define <vscale x 1 x i1> @vfptoui_nxv1f16_nxv1i1(<vscale x 1 x half> %va) strictfp { 22; CHECK-LABEL: vfptoui_nxv1f16_nxv1i1: 23; CHECK: # %bb.0: 24; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma 25; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 26; CHECK-NEXT: vand.vi v8, v9, 1 27; CHECK-NEXT: vmsne.vi v0, v8, 0 28; CHECK-NEXT: ret 29 %evec = call <vscale x 1 x i1> @llvm.experimental.constrained.fptoui.nxv1i1.nxv1f16(<vscale x 1 x half> %va, metadata !"fpexcept.strict") 30 ret <vscale x 1 x i1> %evec 31} 32 33declare <vscale x 1 x i7> @llvm.experimental.constrained.fptosi.nxv1i7.nxv1f16(<vscale x 1 x half>, metadata) 34define <vscale x 1 x i7> @vfptosi_nxv1f16_nxv1i7(<vscale x 1 x half> %va) strictfp { 35; CHECK-LABEL: vfptosi_nxv1f16_nxv1i7: 36; CHECK: # %bb.0: 37; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma 38; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 39; CHECK-NEXT: vmv1r.v v8, v9 40; CHECK-NEXT: ret 41 %evec = call <vscale x 1 x i7> @llvm.experimental.constrained.fptosi.nxv1i7.nxv1f16(<vscale x 1 x half> %va, metadata !"fpexcept.strict") 42 ret <vscale x 1 x i7> %evec 43} 44 45declare <vscale x 1 x i7> @llvm.experimental.constrained.fptoui.nxv1i7.nxv1f16(<vscale x 1 x half>, metadata) 46define <vscale x 1 x i7> @vfptoui_nxv1f16_nxv1i7(<vscale x 1 x half> %va) strictfp { 47; CHECK-LABEL: vfptoui_nxv1f16_nxv1i7: 48; CHECK: # %bb.0: 49; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma 50; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 51; CHECK-NEXT: vmv1r.v v8, v9 52; CHECK-NEXT: ret 53 %evec = call <vscale x 1 x i7> @llvm.experimental.constrained.fptoui.nxv1i7.nxv1f16(<vscale x 1 x half> %va, metadata !"fpexcept.strict") 54 ret <vscale x 1 x i7> %evec 55} 56 57declare <vscale x 1 x i8> @llvm.experimental.constrained.fptosi.nxv1i8.nxv1f16(<vscale x 1 x half>, metadata) 58define <vscale x 1 x i8> @vfptosi_nxv1f16_nxv1i8(<vscale x 1 x half> %va) strictfp { 59; CHECK-LABEL: vfptosi_nxv1f16_nxv1i8: 60; CHECK: # %bb.0: 61; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma 62; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 63; CHECK-NEXT: vmv1r.v v8, v9 64; CHECK-NEXT: ret 65 %evec = call <vscale x 1 x i8> @llvm.experimental.constrained.fptosi.nxv1i8.nxv1f16(<vscale x 1 x half> %va, metadata !"fpexcept.strict") 66 ret <vscale x 1 x i8> %evec 67} 68 69declare <vscale x 1 x i8> @llvm.experimental.constrained.fptoui.nxv1i8.nxv1f16(<vscale x 1 x half>, metadata) 70define <vscale x 1 x i8> @vfptoui_nxv1f16_nxv1i8(<vscale x 1 x half> %va) strictfp { 71; CHECK-LABEL: vfptoui_nxv1f16_nxv1i8: 72; CHECK: # %bb.0: 73; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma 74; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 75; CHECK-NEXT: vmv1r.v v8, v9 76; CHECK-NEXT: ret 77 %evec = call <vscale x 1 x i8> @llvm.experimental.constrained.fptoui.nxv1i8.nxv1f16(<vscale x 1 x half> %va, metadata !"fpexcept.strict") 78 ret <vscale x 1 x i8> %evec 79} 80 81declare <vscale x 1 x i16> @llvm.experimental.constrained.fptosi.nxv1i16.nxv1f16(<vscale x 1 x half>, metadata) 82define <vscale x 1 x i16> @vfptosi_nxv1f16_nxv1i16(<vscale x 1 x half> %va) strictfp { 83; CHECK-LABEL: vfptosi_nxv1f16_nxv1i16: 84; CHECK: # %bb.0: 85; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma 86; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 87; CHECK-NEXT: ret 88 %evec = call <vscale x 1 x i16> @llvm.experimental.constrained.fptosi.nxv1i16.nxv1f16(<vscale x 1 x half> %va, metadata !"fpexcept.strict") 89 ret <vscale x 1 x i16> %evec 90} 91 92declare <vscale x 1 x i16> @llvm.experimental.constrained.fptoui.nxv1i16.nxv1f16(<vscale x 1 x half>, metadata) 93define <vscale x 1 x i16> @vfptoui_nxv1f16_nxv1i16(<vscale x 1 x half> %va) strictfp { 94; CHECK-LABEL: vfptoui_nxv1f16_nxv1i16: 95; CHECK: # %bb.0: 96; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma 97; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 98; CHECK-NEXT: ret 99 %evec = call <vscale x 1 x i16> @llvm.experimental.constrained.fptoui.nxv1i16.nxv1f16(<vscale x 1 x half> %va, metadata !"fpexcept.strict") 100 ret <vscale x 1 x i16> %evec 101} 102 103declare <vscale x 1 x i32> @llvm.experimental.constrained.fptosi.nxv1i32.nxv1f16(<vscale x 1 x half>, metadata) 104define <vscale x 1 x i32> @vfptosi_nxv1f16_nxv1i32(<vscale x 1 x half> %va) strictfp { 105; CHECK-LABEL: vfptosi_nxv1f16_nxv1i32: 106; CHECK: # %bb.0: 107; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma 108; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8 109; CHECK-NEXT: vmv1r.v v8, v9 110; CHECK-NEXT: ret 111 %evec = call <vscale x 1 x i32> @llvm.experimental.constrained.fptosi.nxv1i32.nxv1f16(<vscale x 1 x half> %va, metadata !"fpexcept.strict") 112 ret <vscale x 1 x i32> %evec 113} 114 115declare <vscale x 1 x i32> @llvm.experimental.constrained.fptoui.nxv1i32.nxv1f16(<vscale x 1 x half>, metadata) 116define <vscale x 1 x i32> @vfptoui_nxv1f16_nxv1i32(<vscale x 1 x half> %va) strictfp { 117; CHECK-LABEL: vfptoui_nxv1f16_nxv1i32: 118; CHECK: # %bb.0: 119; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma 120; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8 121; CHECK-NEXT: vmv1r.v v8, v9 122; CHECK-NEXT: ret 123 %evec = call <vscale x 1 x i32> @llvm.experimental.constrained.fptoui.nxv1i32.nxv1f16(<vscale x 1 x half> %va, metadata !"fpexcept.strict") 124 ret <vscale x 1 x i32> %evec 125} 126 127declare <vscale x 1 x i64> @llvm.experimental.constrained.fptosi.nxv1i64.nxv1f16(<vscale x 1 x half>, metadata) 128define <vscale x 1 x i64> @vfptosi_nxv1f16_nxv1i64(<vscale x 1 x half> %va) strictfp { 129; CHECK-LABEL: vfptosi_nxv1f16_nxv1i64: 130; CHECK: # %bb.0: 131; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma 132; CHECK-NEXT: vfwcvt.f.f.v v9, v8 133; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma 134; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v9 135; CHECK-NEXT: ret 136 %evec = call <vscale x 1 x i64> @llvm.experimental.constrained.fptosi.nxv1i64.nxv1f16(<vscale x 1 x half> %va, metadata !"fpexcept.strict") 137 ret <vscale x 1 x i64> %evec 138} 139 140declare <vscale x 1 x i64> @llvm.experimental.constrained.fptoui.nxv1i64.nxv1f16(<vscale x 1 x half>, metadata) 141define <vscale x 1 x i64> @vfptoui_nxv1f16_nxv1i64(<vscale x 1 x half> %va) strictfp { 142; CHECK-LABEL: vfptoui_nxv1f16_nxv1i64: 143; CHECK: # %bb.0: 144; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma 145; CHECK-NEXT: vfwcvt.f.f.v v9, v8 146; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma 147; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v9 148; CHECK-NEXT: ret 149 %evec = call <vscale x 1 x i64> @llvm.experimental.constrained.fptoui.nxv1i64.nxv1f16(<vscale x 1 x half> %va, metadata !"fpexcept.strict") 150 ret <vscale x 1 x i64> %evec 151} 152 153declare <vscale x 2 x i1> @llvm.experimental.constrained.fptosi.nxv2i1.nxv2f16(<vscale x 2 x half>, metadata) 154define <vscale x 2 x i1> @vfptosi_nxv2f16_nxv2i1(<vscale x 2 x half> %va) strictfp { 155; CHECK-LABEL: vfptosi_nxv2f16_nxv2i1: 156; CHECK: # %bb.0: 157; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma 158; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 159; CHECK-NEXT: vand.vi v8, v9, 1 160; CHECK-NEXT: vmsne.vi v0, v8, 0 161; CHECK-NEXT: ret 162 %evec = call <vscale x 2 x i1> @llvm.experimental.constrained.fptosi.nxv2i1.nxv2f16(<vscale x 2 x half> %va, metadata !"fpexcept.strict") 163 ret <vscale x 2 x i1> %evec 164} 165 166declare <vscale x 2 x i1> @llvm.experimental.constrained.fptoui.nxv2i1.nxv2f16(<vscale x 2 x half>, metadata) 167define <vscale x 2 x i1> @vfptoui_nxv2f16_nxv2i1(<vscale x 2 x half> %va) strictfp { 168; CHECK-LABEL: vfptoui_nxv2f16_nxv2i1: 169; CHECK: # %bb.0: 170; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma 171; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 172; CHECK-NEXT: vand.vi v8, v9, 1 173; CHECK-NEXT: vmsne.vi v0, v8, 0 174; CHECK-NEXT: ret 175 %evec = call <vscale x 2 x i1> @llvm.experimental.constrained.fptoui.nxv2i1.nxv2f16(<vscale x 2 x half> %va, metadata !"fpexcept.strict") 176 ret <vscale x 2 x i1> %evec 177} 178 179declare <vscale x 2 x i8> @llvm.experimental.constrained.fptosi.nxv2i8.nxv2f16(<vscale x 2 x half>, metadata) 180define <vscale x 2 x i8> @vfptosi_nxv2f16_nxv2i8(<vscale x 2 x half> %va) strictfp { 181; CHECK-LABEL: vfptosi_nxv2f16_nxv2i8: 182; CHECK: # %bb.0: 183; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma 184; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 185; CHECK-NEXT: vmv1r.v v8, v9 186; CHECK-NEXT: ret 187 %evec = call <vscale x 2 x i8> @llvm.experimental.constrained.fptosi.nxv2i8.nxv2f16(<vscale x 2 x half> %va, metadata !"fpexcept.strict") 188 ret <vscale x 2 x i8> %evec 189} 190 191declare <vscale x 2 x i8> @llvm.experimental.constrained.fptoui.nxv2i8.nxv2f16(<vscale x 2 x half>, metadata) 192define <vscale x 2 x i8> @vfptoui_nxv2f16_nxv2i8(<vscale x 2 x half> %va) strictfp { 193; CHECK-LABEL: vfptoui_nxv2f16_nxv2i8: 194; CHECK: # %bb.0: 195; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma 196; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 197; CHECK-NEXT: vmv1r.v v8, v9 198; CHECK-NEXT: ret 199 %evec = call <vscale x 2 x i8> @llvm.experimental.constrained.fptoui.nxv2i8.nxv2f16(<vscale x 2 x half> %va, metadata !"fpexcept.strict") 200 ret <vscale x 2 x i8> %evec 201} 202 203declare <vscale x 2 x i16> @llvm.experimental.constrained.fptosi.nxv2i16.nxv2f16(<vscale x 2 x half>, metadata) 204define <vscale x 2 x i16> @vfptosi_nxv2f16_nxv2i16(<vscale x 2 x half> %va) strictfp { 205; CHECK-LABEL: vfptosi_nxv2f16_nxv2i16: 206; CHECK: # %bb.0: 207; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma 208; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 209; CHECK-NEXT: ret 210 %evec = call <vscale x 2 x i16> @llvm.experimental.constrained.fptosi.nxv2i16.nxv2f16(<vscale x 2 x half> %va, metadata !"fpexcept.strict") 211 ret <vscale x 2 x i16> %evec 212} 213 214declare <vscale x 2 x i16> @llvm.experimental.constrained.fptoui.nxv2i16.nxv2f16(<vscale x 2 x half>, metadata) 215define <vscale x 2 x i16> @vfptoui_nxv2f16_nxv2i16(<vscale x 2 x half> %va) strictfp { 216; CHECK-LABEL: vfptoui_nxv2f16_nxv2i16: 217; CHECK: # %bb.0: 218; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma 219; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 220; CHECK-NEXT: ret 221 %evec = call <vscale x 2 x i16> @llvm.experimental.constrained.fptoui.nxv2i16.nxv2f16(<vscale x 2 x half> %va, metadata !"fpexcept.strict") 222 ret <vscale x 2 x i16> %evec 223} 224 225declare <vscale x 2 x i32> @llvm.experimental.constrained.fptosi.nxv2i32.nxv2f16(<vscale x 2 x half>, metadata) 226define <vscale x 2 x i32> @vfptosi_nxv2f16_nxv2i32(<vscale x 2 x half> %va) strictfp { 227; CHECK-LABEL: vfptosi_nxv2f16_nxv2i32: 228; CHECK: # %bb.0: 229; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma 230; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8 231; CHECK-NEXT: vmv1r.v v8, v9 232; CHECK-NEXT: ret 233 %evec = call <vscale x 2 x i32> @llvm.experimental.constrained.fptosi.nxv2i32.nxv2f16(<vscale x 2 x half> %va, metadata !"fpexcept.strict") 234 ret <vscale x 2 x i32> %evec 235} 236 237declare <vscale x 2 x i32> @llvm.experimental.constrained.fptoui.nxv2i32.nxv2f16(<vscale x 2 x half>, metadata) 238define <vscale x 2 x i32> @vfptoui_nxv2f16_nxv2i32(<vscale x 2 x half> %va) strictfp { 239; CHECK-LABEL: vfptoui_nxv2f16_nxv2i32: 240; CHECK: # %bb.0: 241; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma 242; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8 243; CHECK-NEXT: vmv1r.v v8, v9 244; CHECK-NEXT: ret 245 %evec = call <vscale x 2 x i32> @llvm.experimental.constrained.fptoui.nxv2i32.nxv2f16(<vscale x 2 x half> %va, metadata !"fpexcept.strict") 246 ret <vscale x 2 x i32> %evec 247} 248 249declare <vscale x 2 x i64> @llvm.experimental.constrained.fptosi.nxv2i64.nxv2f16(<vscale x 2 x half>, metadata) 250define <vscale x 2 x i64> @vfptosi_nxv2f16_nxv2i64(<vscale x 2 x half> %va) strictfp { 251; CHECK-LABEL: vfptosi_nxv2f16_nxv2i64: 252; CHECK: # %bb.0: 253; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma 254; CHECK-NEXT: vfwcvt.f.f.v v10, v8 255; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma 256; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v10 257; CHECK-NEXT: ret 258 %evec = call <vscale x 2 x i64> @llvm.experimental.constrained.fptosi.nxv2i64.nxv2f16(<vscale x 2 x half> %va, metadata !"fpexcept.strict") 259 ret <vscale x 2 x i64> %evec 260} 261 262declare <vscale x 2 x i64> @llvm.experimental.constrained.fptoui.nxv2i64.nxv2f16(<vscale x 2 x half>, metadata) 263define <vscale x 2 x i64> @vfptoui_nxv2f16_nxv2i64(<vscale x 2 x half> %va) strictfp { 264; CHECK-LABEL: vfptoui_nxv2f16_nxv2i64: 265; CHECK: # %bb.0: 266; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma 267; CHECK-NEXT: vfwcvt.f.f.v v10, v8 268; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma 269; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v10 270; CHECK-NEXT: ret 271 %evec = call <vscale x 2 x i64> @llvm.experimental.constrained.fptoui.nxv2i64.nxv2f16(<vscale x 2 x half> %va, metadata !"fpexcept.strict") 272 ret <vscale x 2 x i64> %evec 273} 274 275declare <vscale x 4 x i1> @llvm.experimental.constrained.fptosi.nxv4i1.nxv4f16(<vscale x 4 x half>, metadata) 276define <vscale x 4 x i1> @vfptosi_nxv4f16_nxv4i1(<vscale x 4 x half> %va) strictfp { 277; CHECK-LABEL: vfptosi_nxv4f16_nxv4i1: 278; CHECK: # %bb.0: 279; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma 280; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 281; CHECK-NEXT: vand.vi v8, v9, 1 282; CHECK-NEXT: vmsne.vi v0, v8, 0 283; CHECK-NEXT: ret 284 %evec = call <vscale x 4 x i1> @llvm.experimental.constrained.fptosi.nxv4i1.nxv4f16(<vscale x 4 x half> %va, metadata !"fpexcept.strict") 285 ret <vscale x 4 x i1> %evec 286} 287 288declare <vscale x 4 x i1> @llvm.experimental.constrained.fptoui.nxv4i1.nxv4f16(<vscale x 4 x half>, metadata) 289define <vscale x 4 x i1> @vfptoui_nxv4f16_nxv4i1(<vscale x 4 x half> %va) strictfp { 290; CHECK-LABEL: vfptoui_nxv4f16_nxv4i1: 291; CHECK: # %bb.0: 292; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma 293; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 294; CHECK-NEXT: vand.vi v8, v9, 1 295; CHECK-NEXT: vmsne.vi v0, v8, 0 296; CHECK-NEXT: ret 297 %evec = call <vscale x 4 x i1> @llvm.experimental.constrained.fptoui.nxv4i1.nxv4f16(<vscale x 4 x half> %va, metadata !"fpexcept.strict") 298 ret <vscale x 4 x i1> %evec 299} 300 301declare <vscale x 4 x i8> @llvm.experimental.constrained.fptosi.nxv4i8.nxv4f16(<vscale x 4 x half>, metadata) 302define <vscale x 4 x i8> @vfptosi_nxv4f16_nxv4i8(<vscale x 4 x half> %va) strictfp { 303; CHECK-LABEL: vfptosi_nxv4f16_nxv4i8: 304; CHECK: # %bb.0: 305; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma 306; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 307; CHECK-NEXT: vmv1r.v v8, v9 308; CHECK-NEXT: ret 309 %evec = call <vscale x 4 x i8> @llvm.experimental.constrained.fptosi.nxv4i8.nxv4f16(<vscale x 4 x half> %va, metadata !"fpexcept.strict") 310 ret <vscale x 4 x i8> %evec 311} 312 313declare <vscale x 4 x i8> @llvm.experimental.constrained.fptoui.nxv4i8.nxv4f16(<vscale x 4 x half>, metadata) 314define <vscale x 4 x i8> @vfptoui_nxv4f16_nxv4i8(<vscale x 4 x half> %va) strictfp { 315; CHECK-LABEL: vfptoui_nxv4f16_nxv4i8: 316; CHECK: # %bb.0: 317; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma 318; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 319; CHECK-NEXT: vmv1r.v v8, v9 320; CHECK-NEXT: ret 321 %evec = call <vscale x 4 x i8> @llvm.experimental.constrained.fptoui.nxv4i8.nxv4f16(<vscale x 4 x half> %va, metadata !"fpexcept.strict") 322 ret <vscale x 4 x i8> %evec 323} 324 325declare <vscale x 4 x i16> @llvm.experimental.constrained.fptosi.nxv4i16.nxv4f16(<vscale x 4 x half>, metadata) 326define <vscale x 4 x i16> @vfptosi_nxv4f16_nxv4i16(<vscale x 4 x half> %va) strictfp { 327; CHECK-LABEL: vfptosi_nxv4f16_nxv4i16: 328; CHECK: # %bb.0: 329; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma 330; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 331; CHECK-NEXT: ret 332 %evec = call <vscale x 4 x i16> @llvm.experimental.constrained.fptosi.nxv4i16.nxv4f16(<vscale x 4 x half> %va, metadata !"fpexcept.strict") 333 ret <vscale x 4 x i16> %evec 334} 335 336declare <vscale x 4 x i16> @llvm.experimental.constrained.fptoui.nxv4i16.nxv4f16(<vscale x 4 x half>, metadata) 337define <vscale x 4 x i16> @vfptoui_nxv4f16_nxv4i16(<vscale x 4 x half> %va) strictfp { 338; CHECK-LABEL: vfptoui_nxv4f16_nxv4i16: 339; CHECK: # %bb.0: 340; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma 341; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 342; CHECK-NEXT: ret 343 %evec = call <vscale x 4 x i16> @llvm.experimental.constrained.fptoui.nxv4i16.nxv4f16(<vscale x 4 x half> %va, metadata !"fpexcept.strict") 344 ret <vscale x 4 x i16> %evec 345} 346 347declare <vscale x 4 x i32> @llvm.experimental.constrained.fptosi.nxv4i32.nxv4f16(<vscale x 4 x half>, metadata) 348define <vscale x 4 x i32> @vfptosi_nxv4f16_nxv4i32(<vscale x 4 x half> %va) strictfp { 349; CHECK-LABEL: vfptosi_nxv4f16_nxv4i32: 350; CHECK: # %bb.0: 351; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma 352; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8 353; CHECK-NEXT: vmv2r.v v8, v10 354; CHECK-NEXT: ret 355 %evec = call <vscale x 4 x i32> @llvm.experimental.constrained.fptosi.nxv4i32.nxv4f16(<vscale x 4 x half> %va, metadata !"fpexcept.strict") 356 ret <vscale x 4 x i32> %evec 357} 358 359declare <vscale x 4 x i32> @llvm.experimental.constrained.fptoui.nxv4i32.nxv4f16(<vscale x 4 x half>, metadata) 360define <vscale x 4 x i32> @vfptoui_nxv4f16_nxv4i32(<vscale x 4 x half> %va) strictfp { 361; CHECK-LABEL: vfptoui_nxv4f16_nxv4i32: 362; CHECK: # %bb.0: 363; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma 364; CHECK-NEXT: vfwcvt.rtz.xu.f.v v10, v8 365; CHECK-NEXT: vmv2r.v v8, v10 366; CHECK-NEXT: ret 367 %evec = call <vscale x 4 x i32> @llvm.experimental.constrained.fptoui.nxv4i32.nxv4f16(<vscale x 4 x half> %va, metadata !"fpexcept.strict") 368 ret <vscale x 4 x i32> %evec 369} 370 371declare <vscale x 4 x i64> @llvm.experimental.constrained.fptosi.nxv4i64.nxv4f16(<vscale x 4 x half>, metadata) 372define <vscale x 4 x i64> @vfptosi_nxv4f16_nxv4i64(<vscale x 4 x half> %va) strictfp { 373; CHECK-LABEL: vfptosi_nxv4f16_nxv4i64: 374; CHECK: # %bb.0: 375; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma 376; CHECK-NEXT: vfwcvt.f.f.v v12, v8 377; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma 378; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v12 379; CHECK-NEXT: ret 380 %evec = call <vscale x 4 x i64> @llvm.experimental.constrained.fptosi.nxv4i64.nxv4f16(<vscale x 4 x half> %va, metadata !"fpexcept.strict") 381 ret <vscale x 4 x i64> %evec 382} 383 384declare <vscale x 4 x i64> @llvm.experimental.constrained.fptoui.nxv4i64.nxv4f16(<vscale x 4 x half>, metadata) 385define <vscale x 4 x i64> @vfptoui_nxv4f16_nxv4i64(<vscale x 4 x half> %va) strictfp { 386; CHECK-LABEL: vfptoui_nxv4f16_nxv4i64: 387; CHECK: # %bb.0: 388; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma 389; CHECK-NEXT: vfwcvt.f.f.v v12, v8 390; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma 391; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v12 392; CHECK-NEXT: ret 393 %evec = call <vscale x 4 x i64> @llvm.experimental.constrained.fptoui.nxv4i64.nxv4f16(<vscale x 4 x half> %va, metadata !"fpexcept.strict") 394 ret <vscale x 4 x i64> %evec 395} 396 397declare <vscale x 8 x i1> @llvm.experimental.constrained.fptosi.nxv8i1.nxv8f16(<vscale x 8 x half>, metadata) 398define <vscale x 8 x i1> @vfptosi_nxv8f16_nxv8i1(<vscale x 8 x half> %va) strictfp { 399; CHECK-LABEL: vfptosi_nxv8f16_nxv8i1: 400; CHECK: # %bb.0: 401; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma 402; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 403; CHECK-NEXT: vand.vi v8, v10, 1 404; CHECK-NEXT: vmsne.vi v0, v8, 0 405; CHECK-NEXT: ret 406 %evec = call <vscale x 8 x i1> @llvm.experimental.constrained.fptosi.nxv8i1.nxv8f16(<vscale x 8 x half> %va, metadata !"fpexcept.strict") 407 ret <vscale x 8 x i1> %evec 408} 409 410declare <vscale x 8 x i1> @llvm.experimental.constrained.fptoui.nxv8i1.nxv8f16(<vscale x 8 x half>, metadata) 411define <vscale x 8 x i1> @vfptoui_nxv8f16_nxv8i1(<vscale x 8 x half> %va) strictfp { 412; CHECK-LABEL: vfptoui_nxv8f16_nxv8i1: 413; CHECK: # %bb.0: 414; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma 415; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 416; CHECK-NEXT: vand.vi v8, v10, 1 417; CHECK-NEXT: vmsne.vi v0, v8, 0 418; CHECK-NEXT: ret 419 %evec = call <vscale x 8 x i1> @llvm.experimental.constrained.fptoui.nxv8i1.nxv8f16(<vscale x 8 x half> %va, metadata !"fpexcept.strict") 420 ret <vscale x 8 x i1> %evec 421} 422 423declare <vscale x 8 x i8> @llvm.experimental.constrained.fptosi.nxv8i8.nxv8f16(<vscale x 8 x half>, metadata) 424define <vscale x 8 x i8> @vfptosi_nxv8f16_nxv8i8(<vscale x 8 x half> %va) strictfp { 425; CHECK-LABEL: vfptosi_nxv8f16_nxv8i8: 426; CHECK: # %bb.0: 427; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma 428; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 429; CHECK-NEXT: vmv.v.v v8, v10 430; CHECK-NEXT: ret 431 %evec = call <vscale x 8 x i8> @llvm.experimental.constrained.fptosi.nxv8i8.nxv8f16(<vscale x 8 x half> %va, metadata !"fpexcept.strict") 432 ret <vscale x 8 x i8> %evec 433} 434 435declare <vscale x 8 x i8> @llvm.experimental.constrained.fptoui.nxv8i8.nxv8f16(<vscale x 8 x half>, metadata) 436define <vscale x 8 x i8> @vfptoui_nxv8f16_nxv8i8(<vscale x 8 x half> %va) strictfp { 437; CHECK-LABEL: vfptoui_nxv8f16_nxv8i8: 438; CHECK: # %bb.0: 439; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma 440; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 441; CHECK-NEXT: vmv.v.v v8, v10 442; CHECK-NEXT: ret 443 %evec = call <vscale x 8 x i8> @llvm.experimental.constrained.fptoui.nxv8i8.nxv8f16(<vscale x 8 x half> %va, metadata !"fpexcept.strict") 444 ret <vscale x 8 x i8> %evec 445} 446 447declare <vscale x 8 x i16> @llvm.experimental.constrained.fptosi.nxv8i16.nxv8f16(<vscale x 8 x half>, metadata) 448define <vscale x 8 x i16> @vfptosi_nxv8f16_nxv8i16(<vscale x 8 x half> %va) strictfp { 449; CHECK-LABEL: vfptosi_nxv8f16_nxv8i16: 450; CHECK: # %bb.0: 451; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma 452; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 453; CHECK-NEXT: ret 454 %evec = call <vscale x 8 x i16> @llvm.experimental.constrained.fptosi.nxv8i16.nxv8f16(<vscale x 8 x half> %va, metadata !"fpexcept.strict") 455 ret <vscale x 8 x i16> %evec 456} 457 458declare <vscale x 8 x i16> @llvm.experimental.constrained.fptoui.nxv8i16.nxv8f16(<vscale x 8 x half>, metadata) 459define <vscale x 8 x i16> @vfptoui_nxv8f16_nxv8i16(<vscale x 8 x half> %va) strictfp { 460; CHECK-LABEL: vfptoui_nxv8f16_nxv8i16: 461; CHECK: # %bb.0: 462; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma 463; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 464; CHECK-NEXT: ret 465 %evec = call <vscale x 8 x i16> @llvm.experimental.constrained.fptoui.nxv8i16.nxv8f16(<vscale x 8 x half> %va, metadata !"fpexcept.strict") 466 ret <vscale x 8 x i16> %evec 467} 468 469declare <vscale x 8 x i32> @llvm.experimental.constrained.fptosi.nxv8i32.nxv8f16(<vscale x 8 x half>, metadata) 470define <vscale x 8 x i32> @vfptosi_nxv8f16_nxv8i32(<vscale x 8 x half> %va) strictfp { 471; CHECK-LABEL: vfptosi_nxv8f16_nxv8i32: 472; CHECK: # %bb.0: 473; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma 474; CHECK-NEXT: vfwcvt.rtz.x.f.v v12, v8 475; CHECK-NEXT: vmv4r.v v8, v12 476; CHECK-NEXT: ret 477 %evec = call <vscale x 8 x i32> @llvm.experimental.constrained.fptosi.nxv8i32.nxv8f16(<vscale x 8 x half> %va, metadata !"fpexcept.strict") 478 ret <vscale x 8 x i32> %evec 479} 480 481declare <vscale x 8 x i32> @llvm.experimental.constrained.fptoui.nxv8i32.nxv8f16(<vscale x 8 x half>, metadata) 482define <vscale x 8 x i32> @vfptoui_nxv8f16_nxv8i32(<vscale x 8 x half> %va) strictfp { 483; CHECK-LABEL: vfptoui_nxv8f16_nxv8i32: 484; CHECK: # %bb.0: 485; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma 486; CHECK-NEXT: vfwcvt.rtz.xu.f.v v12, v8 487; CHECK-NEXT: vmv4r.v v8, v12 488; CHECK-NEXT: ret 489 %evec = call <vscale x 8 x i32> @llvm.experimental.constrained.fptoui.nxv8i32.nxv8f16(<vscale x 8 x half> %va, metadata !"fpexcept.strict") 490 ret <vscale x 8 x i32> %evec 491} 492 493declare <vscale x 8 x i64> @llvm.experimental.constrained.fptosi.nxv8i64.nxv8f16(<vscale x 8 x half>, metadata) 494define <vscale x 8 x i64> @vfptosi_nxv8f16_nxv8i64(<vscale x 8 x half> %va) strictfp { 495; CHECK-LABEL: vfptosi_nxv8f16_nxv8i64: 496; CHECK: # %bb.0: 497; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma 498; CHECK-NEXT: vfwcvt.f.f.v v16, v8 499; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma 500; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v16 501; CHECK-NEXT: ret 502 %evec = call <vscale x 8 x i64> @llvm.experimental.constrained.fptosi.nxv8i64.nxv8f16(<vscale x 8 x half> %va, metadata !"fpexcept.strict") 503 ret <vscale x 8 x i64> %evec 504} 505 506declare <vscale x 8 x i64> @llvm.experimental.constrained.fptoui.nxv8i64.nxv8f16(<vscale x 8 x half>, metadata) 507define <vscale x 8 x i64> @vfptoui_nxv8f16_nxv8i64(<vscale x 8 x half> %va) strictfp { 508; CHECK-LABEL: vfptoui_nxv8f16_nxv8i64: 509; CHECK: # %bb.0: 510; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma 511; CHECK-NEXT: vfwcvt.f.f.v v16, v8 512; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma 513; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v16 514; CHECK-NEXT: ret 515 %evec = call <vscale x 8 x i64> @llvm.experimental.constrained.fptoui.nxv8i64.nxv8f16(<vscale x 8 x half> %va, metadata !"fpexcept.strict") 516 ret <vscale x 8 x i64> %evec 517} 518 519declare <vscale x 16 x i1> @llvm.experimental.constrained.fptosi.nxv16i1.nxv16f16(<vscale x 16 x half>, metadata) 520define <vscale x 16 x i1> @vfptosi_nxv16f16_nxv16i1(<vscale x 16 x half> %va) strictfp { 521; CHECK-LABEL: vfptosi_nxv16f16_nxv16i1: 522; CHECK: # %bb.0: 523; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma 524; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 525; CHECK-NEXT: vand.vi v8, v12, 1 526; CHECK-NEXT: vmsne.vi v0, v8, 0 527; CHECK-NEXT: ret 528 %evec = call <vscale x 16 x i1> @llvm.experimental.constrained.fptosi.nxv16i1.nxv16f16(<vscale x 16 x half> %va, metadata !"fpexcept.strict") 529 ret <vscale x 16 x i1> %evec 530} 531 532declare <vscale x 16 x i1> @llvm.experimental.constrained.fptoui.nxv16i1.nxv16f16(<vscale x 16 x half>, metadata) 533define <vscale x 16 x i1> @vfptoui_nxv16f16_nxv16i1(<vscale x 16 x half> %va) strictfp { 534; CHECK-LABEL: vfptoui_nxv16f16_nxv16i1: 535; CHECK: # %bb.0: 536; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma 537; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 538; CHECK-NEXT: vand.vi v8, v12, 1 539; CHECK-NEXT: vmsne.vi v0, v8, 0 540; CHECK-NEXT: ret 541 %evec = call <vscale x 16 x i1> @llvm.experimental.constrained.fptoui.nxv16i1.nxv16f16(<vscale x 16 x half> %va, metadata !"fpexcept.strict") 542 ret <vscale x 16 x i1> %evec 543} 544 545declare <vscale x 16 x i8> @llvm.experimental.constrained.fptosi.nxv16i8.nxv16f16(<vscale x 16 x half>, metadata) 546define <vscale x 16 x i8> @vfptosi_nxv16f16_nxv16i8(<vscale x 16 x half> %va) strictfp { 547; CHECK-LABEL: vfptosi_nxv16f16_nxv16i8: 548; CHECK: # %bb.0: 549; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma 550; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 551; CHECK-NEXT: vmv.v.v v8, v12 552; CHECK-NEXT: ret 553 %evec = call <vscale x 16 x i8> @llvm.experimental.constrained.fptosi.nxv16i8.nxv16f16(<vscale x 16 x half> %va, metadata !"fpexcept.strict") 554 ret <vscale x 16 x i8> %evec 555} 556 557declare <vscale x 16 x i8> @llvm.experimental.constrained.fptoui.nxv16i8.nxv16f16(<vscale x 16 x half>, metadata) 558define <vscale x 16 x i8> @vfptoui_nxv16f16_nxv16i8(<vscale x 16 x half> %va) strictfp { 559; CHECK-LABEL: vfptoui_nxv16f16_nxv16i8: 560; CHECK: # %bb.0: 561; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma 562; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 563; CHECK-NEXT: vmv.v.v v8, v12 564; CHECK-NEXT: ret 565 %evec = call <vscale x 16 x i8> @llvm.experimental.constrained.fptoui.nxv16i8.nxv16f16(<vscale x 16 x half> %va, metadata !"fpexcept.strict") 566 ret <vscale x 16 x i8> %evec 567} 568 569declare <vscale x 16 x i16> @llvm.experimental.constrained.fptosi.nxv16i16.nxv16f16(<vscale x 16 x half>, metadata) 570define <vscale x 16 x i16> @vfptosi_nxv16f16_nxv16i16(<vscale x 16 x half> %va) strictfp { 571; CHECK-LABEL: vfptosi_nxv16f16_nxv16i16: 572; CHECK: # %bb.0: 573; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma 574; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 575; CHECK-NEXT: ret 576 %evec = call <vscale x 16 x i16> @llvm.experimental.constrained.fptosi.nxv16i16.nxv16f16(<vscale x 16 x half> %va, metadata !"fpexcept.strict") 577 ret <vscale x 16 x i16> %evec 578} 579 580declare <vscale x 16 x i16> @llvm.experimental.constrained.fptoui.nxv16i16.nxv16f16(<vscale x 16 x half>, metadata) 581define <vscale x 16 x i16> @vfptoui_nxv16f16_nxv16i16(<vscale x 16 x half> %va) strictfp { 582; CHECK-LABEL: vfptoui_nxv16f16_nxv16i16: 583; CHECK: # %bb.0: 584; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma 585; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 586; CHECK-NEXT: ret 587 %evec = call <vscale x 16 x i16> @llvm.experimental.constrained.fptoui.nxv16i16.nxv16f16(<vscale x 16 x half> %va, metadata !"fpexcept.strict") 588 ret <vscale x 16 x i16> %evec 589} 590 591declare <vscale x 16 x i32> @llvm.experimental.constrained.fptosi.nxv16i32.nxv16f16(<vscale x 16 x half>, metadata) 592define <vscale x 16 x i32> @vfptosi_nxv16f16_nxv16i32(<vscale x 16 x half> %va) strictfp { 593; CHECK-LABEL: vfptosi_nxv16f16_nxv16i32: 594; CHECK: # %bb.0: 595; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma 596; CHECK-NEXT: vfwcvt.rtz.x.f.v v16, v8 597; CHECK-NEXT: vmv8r.v v8, v16 598; CHECK-NEXT: ret 599 %evec = call <vscale x 16 x i32> @llvm.experimental.constrained.fptosi.nxv16i32.nxv16f16(<vscale x 16 x half> %va, metadata !"fpexcept.strict") 600 ret <vscale x 16 x i32> %evec 601} 602 603declare <vscale x 16 x i32> @llvm.experimental.constrained.fptoui.nxv16i32.nxv16f16(<vscale x 16 x half>, metadata) 604define <vscale x 16 x i32> @vfptoui_nxv16f16_nxv16i32(<vscale x 16 x half> %va) strictfp { 605; CHECK-LABEL: vfptoui_nxv16f16_nxv16i32: 606; CHECK: # %bb.0: 607; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma 608; CHECK-NEXT: vfwcvt.rtz.xu.f.v v16, v8 609; CHECK-NEXT: vmv8r.v v8, v16 610; CHECK-NEXT: ret 611 %evec = call <vscale x 16 x i32> @llvm.experimental.constrained.fptoui.nxv16i32.nxv16f16(<vscale x 16 x half> %va, metadata !"fpexcept.strict") 612 ret <vscale x 16 x i32> %evec 613} 614 615declare <vscale x 32 x i1> @llvm.experimental.constrained.fptosi.nxv32i1.nxv32f16(<vscale x 32 x half>, metadata) 616define <vscale x 32 x i1> @vfptosi_nxv32f16_nxv32i1(<vscale x 32 x half> %va) strictfp { 617; CHECK-LABEL: vfptosi_nxv32f16_nxv32i1: 618; CHECK: # %bb.0: 619; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma 620; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 621; CHECK-NEXT: vand.vi v8, v16, 1 622; CHECK-NEXT: vmsne.vi v0, v8, 0 623; CHECK-NEXT: ret 624 %evec = call <vscale x 32 x i1> @llvm.experimental.constrained.fptosi.nxv32i1.nxv32f16(<vscale x 32 x half> %va, metadata !"fpexcept.strict") 625 ret <vscale x 32 x i1> %evec 626} 627 628declare <vscale x 32 x i1> @llvm.experimental.constrained.fptoui.nxv32i1.nxv32f16(<vscale x 32 x half>, metadata) 629define <vscale x 32 x i1> @vfptoui_nxv32f16_nxv32i1(<vscale x 32 x half> %va) strictfp { 630; CHECK-LABEL: vfptoui_nxv32f16_nxv32i1: 631; CHECK: # %bb.0: 632; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma 633; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 634; CHECK-NEXT: vand.vi v8, v16, 1 635; CHECK-NEXT: vmsne.vi v0, v8, 0 636; CHECK-NEXT: ret 637 %evec = call <vscale x 32 x i1> @llvm.experimental.constrained.fptoui.nxv32i1.nxv32f16(<vscale x 32 x half> %va, metadata !"fpexcept.strict") 638 ret <vscale x 32 x i1> %evec 639} 640 641declare <vscale x 32 x i8> @llvm.experimental.constrained.fptosi.nxv32i8.nxv32f16(<vscale x 32 x half>, metadata) 642define <vscale x 32 x i8> @vfptosi_nxv32f16_nxv32i8(<vscale x 32 x half> %va) strictfp { 643; CHECK-LABEL: vfptosi_nxv32f16_nxv32i8: 644; CHECK: # %bb.0: 645; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma 646; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 647; CHECK-NEXT: vmv.v.v v8, v16 648; CHECK-NEXT: ret 649 %evec = call <vscale x 32 x i8> @llvm.experimental.constrained.fptosi.nxv32i8.nxv32f16(<vscale x 32 x half> %va, metadata !"fpexcept.strict") 650 ret <vscale x 32 x i8> %evec 651} 652 653declare <vscale x 32 x i8> @llvm.experimental.constrained.fptoui.nxv32i8.nxv32f16(<vscale x 32 x half>, metadata) 654define <vscale x 32 x i8> @vfptoui_nxv32f16_nxv32i8(<vscale x 32 x half> %va) strictfp { 655; CHECK-LABEL: vfptoui_nxv32f16_nxv32i8: 656; CHECK: # %bb.0: 657; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma 658; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 659; CHECK-NEXT: vmv.v.v v8, v16 660; CHECK-NEXT: ret 661 %evec = call <vscale x 32 x i8> @llvm.experimental.constrained.fptoui.nxv32i8.nxv32f16(<vscale x 32 x half> %va, metadata !"fpexcept.strict") 662 ret <vscale x 32 x i8> %evec 663} 664 665declare <vscale x 32 x i16> @llvm.experimental.constrained.fptosi.nxv32i16.nxv32f16(<vscale x 32 x half>, metadata) 666define <vscale x 32 x i16> @vfptosi_nxv32f16_nxv32i16(<vscale x 32 x half> %va) strictfp { 667; CHECK-LABEL: vfptosi_nxv32f16_nxv32i16: 668; CHECK: # %bb.0: 669; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma 670; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 671; CHECK-NEXT: ret 672 %evec = call <vscale x 32 x i16> @llvm.experimental.constrained.fptosi.nxv32i16.nxv32f16(<vscale x 32 x half> %va, metadata !"fpexcept.strict") 673 ret <vscale x 32 x i16> %evec 674} 675 676declare <vscale x 32 x i16> @llvm.experimental.constrained.fptoui.nxv32i16.nxv32f16(<vscale x 32 x half>, metadata) 677define <vscale x 32 x i16> @vfptoui_nxv32f16_nxv32i16(<vscale x 32 x half> %va) strictfp { 678; CHECK-LABEL: vfptoui_nxv32f16_nxv32i16: 679; CHECK: # %bb.0: 680; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma 681; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 682; CHECK-NEXT: ret 683 %evec = call <vscale x 32 x i16> @llvm.experimental.constrained.fptoui.nxv32i16.nxv32f16(<vscale x 32 x half> %va, metadata !"fpexcept.strict") 684 ret <vscale x 32 x i16> %evec 685} 686 687declare <vscale x 1 x i1> @llvm.experimental.constrained.fptosi.nxv1i1.nxv1f32(<vscale x 1 x float>, metadata) 688define <vscale x 1 x i1> @vfptosi_nxv1f32_nxv1i1(<vscale x 1 x float> %va) strictfp { 689; CHECK-LABEL: vfptosi_nxv1f32_nxv1i1: 690; CHECK: # %bb.0: 691; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma 692; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 693; CHECK-NEXT: vand.vi v8, v9, 1 694; CHECK-NEXT: vmsne.vi v0, v8, 0 695; CHECK-NEXT: ret 696 %evec = call <vscale x 1 x i1> @llvm.experimental.constrained.fptosi.nxv1i1.nxv1f32(<vscale x 1 x float> %va, metadata !"fpexcept.strict") 697 ret <vscale x 1 x i1> %evec 698} 699 700declare <vscale x 1 x i1> @llvm.experimental.constrained.fptoui.nxv1i1.nxv1f32(<vscale x 1 x float>, metadata) 701define <vscale x 1 x i1> @vfptoui_nxv1f32_nxv1i1(<vscale x 1 x float> %va) strictfp { 702; CHECK-LABEL: vfptoui_nxv1f32_nxv1i1: 703; CHECK: # %bb.0: 704; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma 705; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 706; CHECK-NEXT: vand.vi v8, v9, 1 707; CHECK-NEXT: vmsne.vi v0, v8, 0 708; CHECK-NEXT: ret 709 %evec = call <vscale x 1 x i1> @llvm.experimental.constrained.fptoui.nxv1i1.nxv1f32(<vscale x 1 x float> %va, metadata !"fpexcept.strict") 710 ret <vscale x 1 x i1> %evec 711} 712 713declare <vscale x 1 x i8> @llvm.experimental.constrained.fptosi.nxv1i8.nxv1f32(<vscale x 1 x float>, metadata) 714define <vscale x 1 x i8> @vfptosi_nxv1f32_nxv1i8(<vscale x 1 x float> %va) strictfp { 715; CHECK-LABEL: vfptosi_nxv1f32_nxv1i8: 716; CHECK: # %bb.0: 717; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma 718; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 719; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma 720; CHECK-NEXT: vnsrl.wi v8, v9, 0 721; CHECK-NEXT: ret 722 %evec = call <vscale x 1 x i8> @llvm.experimental.constrained.fptosi.nxv1i8.nxv1f32(<vscale x 1 x float> %va, metadata !"fpexcept.strict") 723 ret <vscale x 1 x i8> %evec 724} 725 726declare <vscale x 1 x i8> @llvm.experimental.constrained.fptoui.nxv1i8.nxv1f32(<vscale x 1 x float>, metadata) 727define <vscale x 1 x i8> @vfptoui_nxv1f32_nxv1i8(<vscale x 1 x float> %va) strictfp { 728; CHECK-LABEL: vfptoui_nxv1f32_nxv1i8: 729; CHECK: # %bb.0: 730; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma 731; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 732; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma 733; CHECK-NEXT: vnsrl.wi v8, v9, 0 734; CHECK-NEXT: ret 735 %evec = call <vscale x 1 x i8> @llvm.experimental.constrained.fptoui.nxv1i8.nxv1f32(<vscale x 1 x float> %va, metadata !"fpexcept.strict") 736 ret <vscale x 1 x i8> %evec 737} 738 739declare <vscale x 1 x i16> @llvm.experimental.constrained.fptosi.nxv1i16.nxv1f32(<vscale x 1 x float>, metadata) 740define <vscale x 1 x i16> @vfptosi_nxv1f32_nxv1i16(<vscale x 1 x float> %va) strictfp { 741; CHECK-LABEL: vfptosi_nxv1f32_nxv1i16: 742; CHECK: # %bb.0: 743; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma 744; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 745; CHECK-NEXT: vmv1r.v v8, v9 746; CHECK-NEXT: ret 747 %evec = call <vscale x 1 x i16> @llvm.experimental.constrained.fptosi.nxv1i16.nxv1f32(<vscale x 1 x float> %va, metadata !"fpexcept.strict") 748 ret <vscale x 1 x i16> %evec 749} 750 751declare <vscale x 1 x i16> @llvm.experimental.constrained.fptoui.nxv1i16.nxv1f32(<vscale x 1 x float>, metadata) 752define <vscale x 1 x i16> @vfptoui_nxv1f32_nxv1i16(<vscale x 1 x float> %va) strictfp { 753; CHECK-LABEL: vfptoui_nxv1f32_nxv1i16: 754; CHECK: # %bb.0: 755; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma 756; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 757; CHECK-NEXT: vmv1r.v v8, v9 758; CHECK-NEXT: ret 759 %evec = call <vscale x 1 x i16> @llvm.experimental.constrained.fptoui.nxv1i16.nxv1f32(<vscale x 1 x float> %va, metadata !"fpexcept.strict") 760 ret <vscale x 1 x i16> %evec 761} 762 763declare <vscale x 1 x i32> @llvm.experimental.constrained.fptosi.nxv1i32.nxv1f32(<vscale x 1 x float>, metadata) 764define <vscale x 1 x i32> @vfptosi_nxv1f32_nxv1i32(<vscale x 1 x float> %va) strictfp { 765; CHECK-LABEL: vfptosi_nxv1f32_nxv1i32: 766; CHECK: # %bb.0: 767; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma 768; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 769; CHECK-NEXT: ret 770 %evec = call <vscale x 1 x i32> @llvm.experimental.constrained.fptosi.nxv1i32.nxv1f32(<vscale x 1 x float> %va, metadata !"fpexcept.strict") 771 ret <vscale x 1 x i32> %evec 772} 773 774declare <vscale x 1 x i32> @llvm.experimental.constrained.fptoui.nxv1i32.nxv1f32(<vscale x 1 x float>, metadata) 775define <vscale x 1 x i32> @vfptoui_nxv1f32_nxv1i32(<vscale x 1 x float> %va) strictfp { 776; CHECK-LABEL: vfptoui_nxv1f32_nxv1i32: 777; CHECK: # %bb.0: 778; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma 779; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 780; CHECK-NEXT: ret 781 %evec = call <vscale x 1 x i32> @llvm.experimental.constrained.fptoui.nxv1i32.nxv1f32(<vscale x 1 x float> %va, metadata !"fpexcept.strict") 782 ret <vscale x 1 x i32> %evec 783} 784 785declare <vscale x 1 x i64> @llvm.experimental.constrained.fptosi.nxv1i64.nxv1f32(<vscale x 1 x float>, metadata) 786define <vscale x 1 x i64> @vfptosi_nxv1f32_nxv1i64(<vscale x 1 x float> %va) strictfp { 787; CHECK-LABEL: vfptosi_nxv1f32_nxv1i64: 788; CHECK: # %bb.0: 789; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma 790; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8 791; CHECK-NEXT: vmv1r.v v8, v9 792; CHECK-NEXT: ret 793 %evec = call <vscale x 1 x i64> @llvm.experimental.constrained.fptosi.nxv1i64.nxv1f32(<vscale x 1 x float> %va, metadata !"fpexcept.strict") 794 ret <vscale x 1 x i64> %evec 795} 796 797declare <vscale x 1 x i64> @llvm.experimental.constrained.fptoui.nxv1i64.nxv1f32(<vscale x 1 x float>, metadata) 798define <vscale x 1 x i64> @vfptoui_nxv1f32_nxv1i64(<vscale x 1 x float> %va) strictfp { 799; CHECK-LABEL: vfptoui_nxv1f32_nxv1i64: 800; CHECK: # %bb.0: 801; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma 802; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8 803; CHECK-NEXT: vmv1r.v v8, v9 804; CHECK-NEXT: ret 805 %evec = call <vscale x 1 x i64> @llvm.experimental.constrained.fptoui.nxv1i64.nxv1f32(<vscale x 1 x float> %va, metadata !"fpexcept.strict") 806 ret <vscale x 1 x i64> %evec 807} 808 809declare <vscale x 2 x i1> @llvm.experimental.constrained.fptosi.nxv2i1.nxv2f32(<vscale x 2 x float>, metadata) 810define <vscale x 2 x i1> @vfptosi_nxv2f32_nxv2i1(<vscale x 2 x float> %va) strictfp { 811; CHECK-LABEL: vfptosi_nxv2f32_nxv2i1: 812; CHECK: # %bb.0: 813; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma 814; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 815; CHECK-NEXT: vand.vi v8, v9, 1 816; CHECK-NEXT: vmsne.vi v0, v8, 0 817; CHECK-NEXT: ret 818 %evec = call <vscale x 2 x i1> @llvm.experimental.constrained.fptosi.nxv2i1.nxv2f32(<vscale x 2 x float> %va, metadata !"fpexcept.strict") 819 ret <vscale x 2 x i1> %evec 820} 821 822declare <vscale x 2 x i1> @llvm.experimental.constrained.fptoui.nxv2i1.nxv2f32(<vscale x 2 x float>, metadata) 823define <vscale x 2 x i1> @vfptoui_nxv2f32_nxv2i1(<vscale x 2 x float> %va) strictfp { 824; CHECK-LABEL: vfptoui_nxv2f32_nxv2i1: 825; CHECK: # %bb.0: 826; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma 827; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 828; CHECK-NEXT: vand.vi v8, v9, 1 829; CHECK-NEXT: vmsne.vi v0, v8, 0 830; CHECK-NEXT: ret 831 %evec = call <vscale x 2 x i1> @llvm.experimental.constrained.fptoui.nxv2i1.nxv2f32(<vscale x 2 x float> %va, metadata !"fpexcept.strict") 832 ret <vscale x 2 x i1> %evec 833} 834 835declare <vscale x 2 x i8> @llvm.experimental.constrained.fptosi.nxv2i8.nxv2f32(<vscale x 2 x float>, metadata) 836define <vscale x 2 x i8> @vfptosi_nxv2f32_nxv2i8(<vscale x 2 x float> %va) strictfp { 837; CHECK-LABEL: vfptosi_nxv2f32_nxv2i8: 838; CHECK: # %bb.0: 839; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma 840; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 841; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma 842; CHECK-NEXT: vnsrl.wi v8, v9, 0 843; CHECK-NEXT: ret 844 %evec = call <vscale x 2 x i8> @llvm.experimental.constrained.fptosi.nxv2i8.nxv2f32(<vscale x 2 x float> %va, metadata !"fpexcept.strict") 845 ret <vscale x 2 x i8> %evec 846} 847 848declare <vscale x 2 x i8> @llvm.experimental.constrained.fptoui.nxv2i8.nxv2f32(<vscale x 2 x float>, metadata) 849define <vscale x 2 x i8> @vfptoui_nxv2f32_nxv2i8(<vscale x 2 x float> %va) strictfp { 850; CHECK-LABEL: vfptoui_nxv2f32_nxv2i8: 851; CHECK: # %bb.0: 852; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma 853; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 854; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma 855; CHECK-NEXT: vnsrl.wi v8, v9, 0 856; CHECK-NEXT: ret 857 %evec = call <vscale x 2 x i8> @llvm.experimental.constrained.fptoui.nxv2i8.nxv2f32(<vscale x 2 x float> %va, metadata !"fpexcept.strict") 858 ret <vscale x 2 x i8> %evec 859} 860 861declare <vscale x 2 x i16> @llvm.experimental.constrained.fptosi.nxv2i16.nxv2f32(<vscale x 2 x float>, metadata) 862define <vscale x 2 x i16> @vfptosi_nxv2f32_nxv2i16(<vscale x 2 x float> %va) strictfp { 863; CHECK-LABEL: vfptosi_nxv2f32_nxv2i16: 864; CHECK: # %bb.0: 865; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma 866; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 867; CHECK-NEXT: vmv1r.v v8, v9 868; CHECK-NEXT: ret 869 %evec = call <vscale x 2 x i16> @llvm.experimental.constrained.fptosi.nxv2i16.nxv2f32(<vscale x 2 x float> %va, metadata !"fpexcept.strict") 870 ret <vscale x 2 x i16> %evec 871} 872 873declare <vscale x 2 x i16> @llvm.experimental.constrained.fptoui.nxv2i16.nxv2f32(<vscale x 2 x float>, metadata) 874define <vscale x 2 x i16> @vfptoui_nxv2f32_nxv2i16(<vscale x 2 x float> %va) strictfp { 875; CHECK-LABEL: vfptoui_nxv2f32_nxv2i16: 876; CHECK: # %bb.0: 877; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma 878; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 879; CHECK-NEXT: vmv1r.v v8, v9 880; CHECK-NEXT: ret 881 %evec = call <vscale x 2 x i16> @llvm.experimental.constrained.fptoui.nxv2i16.nxv2f32(<vscale x 2 x float> %va, metadata !"fpexcept.strict") 882 ret <vscale x 2 x i16> %evec 883} 884 885declare <vscale x 2 x i32> @llvm.experimental.constrained.fptosi.nxv2i32.nxv2f32(<vscale x 2 x float>, metadata) 886define <vscale x 2 x i32> @vfptosi_nxv2f32_nxv2i32(<vscale x 2 x float> %va) strictfp { 887; CHECK-LABEL: vfptosi_nxv2f32_nxv2i32: 888; CHECK: # %bb.0: 889; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma 890; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 891; CHECK-NEXT: ret 892 %evec = call <vscale x 2 x i32> @llvm.experimental.constrained.fptosi.nxv2i32.nxv2f32(<vscale x 2 x float> %va, metadata !"fpexcept.strict") 893 ret <vscale x 2 x i32> %evec 894} 895 896declare <vscale x 2 x i32> @llvm.experimental.constrained.fptoui.nxv2i32.nxv2f32(<vscale x 2 x float>, metadata) 897define <vscale x 2 x i32> @vfptoui_nxv2f32_nxv2i32(<vscale x 2 x float> %va) strictfp { 898; CHECK-LABEL: vfptoui_nxv2f32_nxv2i32: 899; CHECK: # %bb.0: 900; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma 901; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 902; CHECK-NEXT: ret 903 %evec = call <vscale x 2 x i32> @llvm.experimental.constrained.fptoui.nxv2i32.nxv2f32(<vscale x 2 x float> %va, metadata !"fpexcept.strict") 904 ret <vscale x 2 x i32> %evec 905} 906 907declare <vscale x 2 x i64> @llvm.experimental.constrained.fptosi.nxv2i64.nxv2f32(<vscale x 2 x float>, metadata) 908define <vscale x 2 x i64> @vfptosi_nxv2f32_nxv2i64(<vscale x 2 x float> %va) strictfp { 909; CHECK-LABEL: vfptosi_nxv2f32_nxv2i64: 910; CHECK: # %bb.0: 911; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma 912; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8 913; CHECK-NEXT: vmv2r.v v8, v10 914; CHECK-NEXT: ret 915 %evec = call <vscale x 2 x i64> @llvm.experimental.constrained.fptosi.nxv2i64.nxv2f32(<vscale x 2 x float> %va, metadata !"fpexcept.strict") 916 ret <vscale x 2 x i64> %evec 917} 918 919declare <vscale x 2 x i64> @llvm.experimental.constrained.fptoui.nxv2i64.nxv2f32(<vscale x 2 x float>, metadata) 920define <vscale x 2 x i64> @vfptoui_nxv2f32_nxv2i64(<vscale x 2 x float> %va) strictfp { 921; CHECK-LABEL: vfptoui_nxv2f32_nxv2i64: 922; CHECK: # %bb.0: 923; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma 924; CHECK-NEXT: vfwcvt.rtz.xu.f.v v10, v8 925; CHECK-NEXT: vmv2r.v v8, v10 926; CHECK-NEXT: ret 927 %evec = call <vscale x 2 x i64> @llvm.experimental.constrained.fptoui.nxv2i64.nxv2f32(<vscale x 2 x float> %va, metadata !"fpexcept.strict") 928 ret <vscale x 2 x i64> %evec 929} 930 931declare <vscale x 4 x i1> @llvm.experimental.constrained.fptosi.nxv4i1.nxv4f32(<vscale x 4 x float>, metadata) 932define <vscale x 4 x i1> @vfptosi_nxv4f32_nxv4i1(<vscale x 4 x float> %va) strictfp { 933; CHECK-LABEL: vfptosi_nxv4f32_nxv4i1: 934; CHECK: # %bb.0: 935; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma 936; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 937; CHECK-NEXT: vand.vi v8, v10, 1 938; CHECK-NEXT: vmsne.vi v0, v8, 0 939; CHECK-NEXT: ret 940 %evec = call <vscale x 4 x i1> @llvm.experimental.constrained.fptosi.nxv4i1.nxv4f32(<vscale x 4 x float> %va, metadata !"fpexcept.strict") 941 ret <vscale x 4 x i1> %evec 942} 943 944declare <vscale x 4 x i1> @llvm.experimental.constrained.fptoui.nxv4i1.nxv4f32(<vscale x 4 x float>, metadata) 945define <vscale x 4 x i1> @vfptoui_nxv4f32_nxv4i1(<vscale x 4 x float> %va) strictfp { 946; CHECK-LABEL: vfptoui_nxv4f32_nxv4i1: 947; CHECK: # %bb.0: 948; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma 949; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 950; CHECK-NEXT: vand.vi v8, v10, 1 951; CHECK-NEXT: vmsne.vi v0, v8, 0 952; CHECK-NEXT: ret 953 %evec = call <vscale x 4 x i1> @llvm.experimental.constrained.fptoui.nxv4i1.nxv4f32(<vscale x 4 x float> %va, metadata !"fpexcept.strict") 954 ret <vscale x 4 x i1> %evec 955} 956 957declare <vscale x 4 x i8> @llvm.experimental.constrained.fptosi.nxv4i8.nxv4f32(<vscale x 4 x float>, metadata) 958define <vscale x 4 x i8> @vfptosi_nxv4f32_nxv4i8(<vscale x 4 x float> %va) strictfp { 959; CHECK-LABEL: vfptosi_nxv4f32_nxv4i8: 960; CHECK: # %bb.0: 961; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma 962; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 963; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma 964; CHECK-NEXT: vnsrl.wi v8, v10, 0 965; CHECK-NEXT: ret 966 %evec = call <vscale x 4 x i8> @llvm.experimental.constrained.fptosi.nxv4i8.nxv4f32(<vscale x 4 x float> %va, metadata !"fpexcept.strict") 967 ret <vscale x 4 x i8> %evec 968} 969 970declare <vscale x 4 x i8> @llvm.experimental.constrained.fptoui.nxv4i8.nxv4f32(<vscale x 4 x float>, metadata) 971define <vscale x 4 x i8> @vfptoui_nxv4f32_nxv4i8(<vscale x 4 x float> %va) strictfp { 972; CHECK-LABEL: vfptoui_nxv4f32_nxv4i8: 973; CHECK: # %bb.0: 974; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma 975; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 976; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma 977; CHECK-NEXT: vnsrl.wi v8, v10, 0 978; CHECK-NEXT: ret 979 %evec = call <vscale x 4 x i8> @llvm.experimental.constrained.fptoui.nxv4i8.nxv4f32(<vscale x 4 x float> %va, metadata !"fpexcept.strict") 980 ret <vscale x 4 x i8> %evec 981} 982 983declare <vscale x 4 x i16> @llvm.experimental.constrained.fptosi.nxv4i16.nxv4f32(<vscale x 4 x float>, metadata) 984define <vscale x 4 x i16> @vfptosi_nxv4f32_nxv4i16(<vscale x 4 x float> %va) strictfp { 985; CHECK-LABEL: vfptosi_nxv4f32_nxv4i16: 986; CHECK: # %bb.0: 987; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma 988; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 989; CHECK-NEXT: vmv.v.v v8, v10 990; CHECK-NEXT: ret 991 %evec = call <vscale x 4 x i16> @llvm.experimental.constrained.fptosi.nxv4i16.nxv4f32(<vscale x 4 x float> %va, metadata !"fpexcept.strict") 992 ret <vscale x 4 x i16> %evec 993} 994 995declare <vscale x 4 x i16> @llvm.experimental.constrained.fptoui.nxv4i16.nxv4f32(<vscale x 4 x float>, metadata) 996define <vscale x 4 x i16> @vfptoui_nxv4f32_nxv4i16(<vscale x 4 x float> %va) strictfp { 997; CHECK-LABEL: vfptoui_nxv4f32_nxv4i16: 998; CHECK: # %bb.0: 999; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma 1000; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 1001; CHECK-NEXT: vmv.v.v v8, v10 1002; CHECK-NEXT: ret 1003 %evec = call <vscale x 4 x i16> @llvm.experimental.constrained.fptoui.nxv4i16.nxv4f32(<vscale x 4 x float> %va, metadata !"fpexcept.strict") 1004 ret <vscale x 4 x i16> %evec 1005} 1006 1007declare <vscale x 4 x i32> @llvm.experimental.constrained.fptosi.nxv4i32.nxv4f32(<vscale x 4 x float>, metadata) 1008define <vscale x 4 x i32> @vfptosi_nxv4f32_nxv4i32(<vscale x 4 x float> %va) strictfp { 1009; CHECK-LABEL: vfptosi_nxv4f32_nxv4i32: 1010; CHECK: # %bb.0: 1011; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma 1012; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 1013; CHECK-NEXT: ret 1014 %evec = call <vscale x 4 x i32> @llvm.experimental.constrained.fptosi.nxv4i32.nxv4f32(<vscale x 4 x float> %va, metadata !"fpexcept.strict") 1015 ret <vscale x 4 x i32> %evec 1016} 1017 1018declare <vscale x 4 x i32> @llvm.experimental.constrained.fptoui.nxv4i32.nxv4f32(<vscale x 4 x float>, metadata) 1019define <vscale x 4 x i32> @vfptoui_nxv4f32_nxv4i32(<vscale x 4 x float> %va) strictfp { 1020; CHECK-LABEL: vfptoui_nxv4f32_nxv4i32: 1021; CHECK: # %bb.0: 1022; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma 1023; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 1024; CHECK-NEXT: ret 1025 %evec = call <vscale x 4 x i32> @llvm.experimental.constrained.fptoui.nxv4i32.nxv4f32(<vscale x 4 x float> %va, metadata !"fpexcept.strict") 1026 ret <vscale x 4 x i32> %evec 1027} 1028 1029declare <vscale x 4 x i64> @llvm.experimental.constrained.fptosi.nxv4i64.nxv4f32(<vscale x 4 x float>, metadata) 1030define <vscale x 4 x i64> @vfptosi_nxv4f32_nxv4i64(<vscale x 4 x float> %va) strictfp { 1031; CHECK-LABEL: vfptosi_nxv4f32_nxv4i64: 1032; CHECK: # %bb.0: 1033; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma 1034; CHECK-NEXT: vfwcvt.rtz.x.f.v v12, v8 1035; CHECK-NEXT: vmv4r.v v8, v12 1036; CHECK-NEXT: ret 1037 %evec = call <vscale x 4 x i64> @llvm.experimental.constrained.fptosi.nxv4i64.nxv4f32(<vscale x 4 x float> %va, metadata !"fpexcept.strict") 1038 ret <vscale x 4 x i64> %evec 1039} 1040 1041declare <vscale x 4 x i64> @llvm.experimental.constrained.fptoui.nxv4i64.nxv4f32(<vscale x 4 x float>, metadata) 1042define <vscale x 4 x i64> @vfptoui_nxv4f32_nxv4i64(<vscale x 4 x float> %va) strictfp { 1043; CHECK-LABEL: vfptoui_nxv4f32_nxv4i64: 1044; CHECK: # %bb.0: 1045; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma 1046; CHECK-NEXT: vfwcvt.rtz.xu.f.v v12, v8 1047; CHECK-NEXT: vmv4r.v v8, v12 1048; CHECK-NEXT: ret 1049 %evec = call <vscale x 4 x i64> @llvm.experimental.constrained.fptoui.nxv4i64.nxv4f32(<vscale x 4 x float> %va, metadata !"fpexcept.strict") 1050 ret <vscale x 4 x i64> %evec 1051} 1052 1053declare <vscale x 8 x i1> @llvm.experimental.constrained.fptosi.nxv8i1.nxv8f32(<vscale x 8 x float>, metadata) 1054define <vscale x 8 x i1> @vfptosi_nxv8f32_nxv8i1(<vscale x 8 x float> %va) strictfp { 1055; CHECK-LABEL: vfptosi_nxv8f32_nxv8i1: 1056; CHECK: # %bb.0: 1057; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma 1058; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 1059; CHECK-NEXT: vand.vi v8, v12, 1 1060; CHECK-NEXT: vmsne.vi v0, v8, 0 1061; CHECK-NEXT: ret 1062 %evec = call <vscale x 8 x i1> @llvm.experimental.constrained.fptosi.nxv8i1.nxv8f32(<vscale x 8 x float> %va, metadata !"fpexcept.strict") 1063 ret <vscale x 8 x i1> %evec 1064} 1065 1066declare <vscale x 8 x i1> @llvm.experimental.constrained.fptoui.nxv8i1.nxv8f32(<vscale x 8 x float>, metadata) 1067define <vscale x 8 x i1> @vfptoui_nxv8f32_nxv8i1(<vscale x 8 x float> %va) strictfp { 1068; CHECK-LABEL: vfptoui_nxv8f32_nxv8i1: 1069; CHECK: # %bb.0: 1070; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma 1071; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 1072; CHECK-NEXT: vand.vi v8, v12, 1 1073; CHECK-NEXT: vmsne.vi v0, v8, 0 1074; CHECK-NEXT: ret 1075 %evec = call <vscale x 8 x i1> @llvm.experimental.constrained.fptoui.nxv8i1.nxv8f32(<vscale x 8 x float> %va, metadata !"fpexcept.strict") 1076 ret <vscale x 8 x i1> %evec 1077} 1078 1079declare <vscale x 8 x i8> @llvm.experimental.constrained.fptosi.nxv8i8.nxv8f32(<vscale x 8 x float>, metadata) 1080define <vscale x 8 x i8> @vfptosi_nxv8f32_nxv8i8(<vscale x 8 x float> %va) strictfp { 1081; CHECK-LABEL: vfptosi_nxv8f32_nxv8i8: 1082; CHECK: # %bb.0: 1083; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma 1084; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 1085; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma 1086; CHECK-NEXT: vnsrl.wi v8, v12, 0 1087; CHECK-NEXT: ret 1088 %evec = call <vscale x 8 x i8> @llvm.experimental.constrained.fptosi.nxv8i8.nxv8f32(<vscale x 8 x float> %va, metadata !"fpexcept.strict") 1089 ret <vscale x 8 x i8> %evec 1090} 1091 1092declare <vscale x 8 x i8> @llvm.experimental.constrained.fptoui.nxv8i8.nxv8f32(<vscale x 8 x float>, metadata) 1093define <vscale x 8 x i8> @vfptoui_nxv8f32_nxv8i8(<vscale x 8 x float> %va) strictfp { 1094; CHECK-LABEL: vfptoui_nxv8f32_nxv8i8: 1095; CHECK: # %bb.0: 1096; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma 1097; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 1098; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma 1099; CHECK-NEXT: vnsrl.wi v8, v12, 0 1100; CHECK-NEXT: ret 1101 %evec = call <vscale x 8 x i8> @llvm.experimental.constrained.fptoui.nxv8i8.nxv8f32(<vscale x 8 x float> %va, metadata !"fpexcept.strict") 1102 ret <vscale x 8 x i8> %evec 1103} 1104 1105declare <vscale x 8 x i16> @llvm.experimental.constrained.fptosi.nxv8i16.nxv8f32(<vscale x 8 x float>, metadata) 1106define <vscale x 8 x i16> @vfptosi_nxv8f32_nxv8i16(<vscale x 8 x float> %va) strictfp { 1107; CHECK-LABEL: vfptosi_nxv8f32_nxv8i16: 1108; CHECK: # %bb.0: 1109; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma 1110; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 1111; CHECK-NEXT: vmv.v.v v8, v12 1112; CHECK-NEXT: ret 1113 %evec = call <vscale x 8 x i16> @llvm.experimental.constrained.fptosi.nxv8i16.nxv8f32(<vscale x 8 x float> %va, metadata !"fpexcept.strict") 1114 ret <vscale x 8 x i16> %evec 1115} 1116 1117declare <vscale x 8 x i16> @llvm.experimental.constrained.fptoui.nxv8i16.nxv8f32(<vscale x 8 x float>, metadata) 1118define <vscale x 8 x i16> @vfptoui_nxv8f32_nxv8i16(<vscale x 8 x float> %va) strictfp { 1119; CHECK-LABEL: vfptoui_nxv8f32_nxv8i16: 1120; CHECK: # %bb.0: 1121; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma 1122; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 1123; CHECK-NEXT: vmv.v.v v8, v12 1124; CHECK-NEXT: ret 1125 %evec = call <vscale x 8 x i16> @llvm.experimental.constrained.fptoui.nxv8i16.nxv8f32(<vscale x 8 x float> %va, metadata !"fpexcept.strict") 1126 ret <vscale x 8 x i16> %evec 1127} 1128 1129declare <vscale x 8 x i32> @llvm.experimental.constrained.fptosi.nxv8i32.nxv8f32(<vscale x 8 x float>, metadata) 1130define <vscale x 8 x i32> @vfptosi_nxv8f32_nxv8i32(<vscale x 8 x float> %va) strictfp { 1131; CHECK-LABEL: vfptosi_nxv8f32_nxv8i32: 1132; CHECK: # %bb.0: 1133; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma 1134; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 1135; CHECK-NEXT: ret 1136 %evec = call <vscale x 8 x i32> @llvm.experimental.constrained.fptosi.nxv8i32.nxv8f32(<vscale x 8 x float> %va, metadata !"fpexcept.strict") 1137 ret <vscale x 8 x i32> %evec 1138} 1139 1140declare <vscale x 8 x i32> @llvm.experimental.constrained.fptoui.nxv8i32.nxv8f32(<vscale x 8 x float>, metadata) 1141define <vscale x 8 x i32> @vfptoui_nxv8f32_nxv8i32(<vscale x 8 x float> %va) strictfp { 1142; CHECK-LABEL: vfptoui_nxv8f32_nxv8i32: 1143; CHECK: # %bb.0: 1144; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma 1145; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 1146; CHECK-NEXT: ret 1147 %evec = call <vscale x 8 x i32> @llvm.experimental.constrained.fptoui.nxv8i32.nxv8f32(<vscale x 8 x float> %va, metadata !"fpexcept.strict") 1148 ret <vscale x 8 x i32> %evec 1149} 1150 1151declare <vscale x 8 x i64> @llvm.experimental.constrained.fptosi.nxv8i64.nxv8f32(<vscale x 8 x float>, metadata) 1152define <vscale x 8 x i64> @vfptosi_nxv8f32_nxv8i64(<vscale x 8 x float> %va) strictfp { 1153; CHECK-LABEL: vfptosi_nxv8f32_nxv8i64: 1154; CHECK: # %bb.0: 1155; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma 1156; CHECK-NEXT: vfwcvt.rtz.x.f.v v16, v8 1157; CHECK-NEXT: vmv8r.v v8, v16 1158; CHECK-NEXT: ret 1159 %evec = call <vscale x 8 x i64> @llvm.experimental.constrained.fptosi.nxv8i64.nxv8f32(<vscale x 8 x float> %va, metadata !"fpexcept.strict") 1160 ret <vscale x 8 x i64> %evec 1161} 1162 1163declare <vscale x 8 x i64> @llvm.experimental.constrained.fptoui.nxv8i64.nxv8f32(<vscale x 8 x float>, metadata) 1164define <vscale x 8 x i64> @vfptoui_nxv8f32_nxv8i64(<vscale x 8 x float> %va) strictfp { 1165; CHECK-LABEL: vfptoui_nxv8f32_nxv8i64: 1166; CHECK: # %bb.0: 1167; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma 1168; CHECK-NEXT: vfwcvt.rtz.xu.f.v v16, v8 1169; CHECK-NEXT: vmv8r.v v8, v16 1170; CHECK-NEXT: ret 1171 %evec = call <vscale x 8 x i64> @llvm.experimental.constrained.fptoui.nxv8i64.nxv8f32(<vscale x 8 x float> %va, metadata !"fpexcept.strict") 1172 ret <vscale x 8 x i64> %evec 1173} 1174 1175declare <vscale x 16 x i1> @llvm.experimental.constrained.fptosi.nxv16i1.nxv16f32(<vscale x 16 x float>, metadata) 1176define <vscale x 16 x i1> @vfptosi_nxv16f32_nxv16i1(<vscale x 16 x float> %va) strictfp { 1177; CHECK-LABEL: vfptosi_nxv16f32_nxv16i1: 1178; CHECK: # %bb.0: 1179; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma 1180; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 1181; CHECK-NEXT: vand.vi v8, v16, 1 1182; CHECK-NEXT: vmsne.vi v0, v8, 0 1183; CHECK-NEXT: ret 1184 %evec = call <vscale x 16 x i1> @llvm.experimental.constrained.fptosi.nxv16i1.nxv16f32(<vscale x 16 x float> %va, metadata !"fpexcept.strict") 1185 ret <vscale x 16 x i1> %evec 1186} 1187 1188declare <vscale x 16 x i1> @llvm.experimental.constrained.fptoui.nxv16i1.nxv16f32(<vscale x 16 x float>, metadata) 1189define <vscale x 16 x i1> @vfptoui_nxv16f32_nxv16i1(<vscale x 16 x float> %va) strictfp { 1190; CHECK-LABEL: vfptoui_nxv16f32_nxv16i1: 1191; CHECK: # %bb.0: 1192; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma 1193; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 1194; CHECK-NEXT: vand.vi v8, v16, 1 1195; CHECK-NEXT: vmsne.vi v0, v8, 0 1196; CHECK-NEXT: ret 1197 %evec = call <vscale x 16 x i1> @llvm.experimental.constrained.fptoui.nxv16i1.nxv16f32(<vscale x 16 x float> %va, metadata !"fpexcept.strict") 1198 ret <vscale x 16 x i1> %evec 1199} 1200 1201declare <vscale x 16 x i8> @llvm.experimental.constrained.fptosi.nxv16i8.nxv16f32(<vscale x 16 x float>, metadata) 1202define <vscale x 16 x i8> @vfptosi_nxv16f32_nxv16i8(<vscale x 16 x float> %va) strictfp { 1203; CHECK-LABEL: vfptosi_nxv16f32_nxv16i8: 1204; CHECK: # %bb.0: 1205; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma 1206; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 1207; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma 1208; CHECK-NEXT: vnsrl.wi v8, v16, 0 1209; CHECK-NEXT: ret 1210 %evec = call <vscale x 16 x i8> @llvm.experimental.constrained.fptosi.nxv16i8.nxv16f32(<vscale x 16 x float> %va, metadata !"fpexcept.strict") 1211 ret <vscale x 16 x i8> %evec 1212} 1213 1214declare <vscale x 16 x i8> @llvm.experimental.constrained.fptoui.nxv16i8.nxv16f32(<vscale x 16 x float>, metadata) 1215define <vscale x 16 x i8> @vfptoui_nxv16f32_nxv16i8(<vscale x 16 x float> %va) strictfp { 1216; CHECK-LABEL: vfptoui_nxv16f32_nxv16i8: 1217; CHECK: # %bb.0: 1218; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma 1219; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 1220; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma 1221; CHECK-NEXT: vnsrl.wi v8, v16, 0 1222; CHECK-NEXT: ret 1223 %evec = call <vscale x 16 x i8> @llvm.experimental.constrained.fptoui.nxv16i8.nxv16f32(<vscale x 16 x float> %va, metadata !"fpexcept.strict") 1224 ret <vscale x 16 x i8> %evec 1225} 1226 1227declare <vscale x 16 x i16> @llvm.experimental.constrained.fptosi.nxv16i16.nxv16f32(<vscale x 16 x float>, metadata) 1228define <vscale x 16 x i16> @vfptosi_nxv16f32_nxv16i16(<vscale x 16 x float> %va) strictfp { 1229; CHECK-LABEL: vfptosi_nxv16f32_nxv16i16: 1230; CHECK: # %bb.0: 1231; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma 1232; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 1233; CHECK-NEXT: vmv.v.v v8, v16 1234; CHECK-NEXT: ret 1235 %evec = call <vscale x 16 x i16> @llvm.experimental.constrained.fptosi.nxv16i16.nxv16f32(<vscale x 16 x float> %va, metadata !"fpexcept.strict") 1236 ret <vscale x 16 x i16> %evec 1237} 1238 1239declare <vscale x 16 x i16> @llvm.experimental.constrained.fptoui.nxv16i16.nxv16f32(<vscale x 16 x float>, metadata) 1240define <vscale x 16 x i16> @vfptoui_nxv16f32_nxv16i16(<vscale x 16 x float> %va) strictfp { 1241; CHECK-LABEL: vfptoui_nxv16f32_nxv16i16: 1242; CHECK: # %bb.0: 1243; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma 1244; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 1245; CHECK-NEXT: vmv.v.v v8, v16 1246; CHECK-NEXT: ret 1247 %evec = call <vscale x 16 x i16> @llvm.experimental.constrained.fptoui.nxv16i16.nxv16f32(<vscale x 16 x float> %va, metadata !"fpexcept.strict") 1248 ret <vscale x 16 x i16> %evec 1249} 1250 1251declare <vscale x 16 x i32> @llvm.experimental.constrained.fptosi.nxv16i32.nxv16f32(<vscale x 16 x float>, metadata) 1252define <vscale x 16 x i32> @vfptosi_nxv16f32_nxv16i32(<vscale x 16 x float> %va) strictfp { 1253; CHECK-LABEL: vfptosi_nxv16f32_nxv16i32: 1254; CHECK: # %bb.0: 1255; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma 1256; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 1257; CHECK-NEXT: ret 1258 %evec = call <vscale x 16 x i32> @llvm.experimental.constrained.fptosi.nxv16i32.nxv16f32(<vscale x 16 x float> %va, metadata !"fpexcept.strict") 1259 ret <vscale x 16 x i32> %evec 1260} 1261 1262declare <vscale x 16 x i32> @llvm.experimental.constrained.fptoui.nxv16i32.nxv16f32(<vscale x 16 x float>, metadata) 1263define <vscale x 16 x i32> @vfptoui_nxv16f32_nxv16i32(<vscale x 16 x float> %va) strictfp { 1264; CHECK-LABEL: vfptoui_nxv16f32_nxv16i32: 1265; CHECK: # %bb.0: 1266; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma 1267; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 1268; CHECK-NEXT: ret 1269 %evec = call <vscale x 16 x i32> @llvm.experimental.constrained.fptoui.nxv16i32.nxv16f32(<vscale x 16 x float> %va, metadata !"fpexcept.strict") 1270 ret <vscale x 16 x i32> %evec 1271} 1272 1273declare <vscale x 1 x i1> @llvm.experimental.constrained.fptosi.nxv1i1.nxv1f64(<vscale x 1 x double>, metadata) 1274define <vscale x 1 x i1> @vfptosi_nxv1f64_nxv1i1(<vscale x 1 x double> %va) strictfp { 1275; CHECK-LABEL: vfptosi_nxv1f64_nxv1i1: 1276; CHECK: # %bb.0: 1277; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma 1278; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 1279; CHECK-NEXT: vand.vi v8, v9, 1 1280; CHECK-NEXT: vmsne.vi v0, v8, 0 1281; CHECK-NEXT: ret 1282 %evec = call <vscale x 1 x i1> @llvm.experimental.constrained.fptosi.nxv1i1.nxv1f64(<vscale x 1 x double> %va, metadata !"fpexcept.strict") 1283 ret <vscale x 1 x i1> %evec 1284} 1285 1286declare <vscale x 1 x i1> @llvm.experimental.constrained.fptoui.nxv1i1.nxv1f64(<vscale x 1 x double>, metadata) 1287define <vscale x 1 x i1> @vfptoui_nxv1f64_nxv1i1(<vscale x 1 x double> %va) strictfp { 1288; CHECK-LABEL: vfptoui_nxv1f64_nxv1i1: 1289; CHECK: # %bb.0: 1290; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma 1291; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 1292; CHECK-NEXT: vand.vi v8, v9, 1 1293; CHECK-NEXT: vmsne.vi v0, v8, 0 1294; CHECK-NEXT: ret 1295 %evec = call <vscale x 1 x i1> @llvm.experimental.constrained.fptoui.nxv1i1.nxv1f64(<vscale x 1 x double> %va, metadata !"fpexcept.strict") 1296 ret <vscale x 1 x i1> %evec 1297} 1298 1299declare <vscale x 1 x i8> @llvm.experimental.constrained.fptosi.nxv1i8.nxv1f64(<vscale x 1 x double>, metadata) 1300define <vscale x 1 x i8> @vfptosi_nxv1f64_nxv1i8(<vscale x 1 x double> %va) strictfp { 1301; CHECK-LABEL: vfptosi_nxv1f64_nxv1i8: 1302; CHECK: # %bb.0: 1303; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma 1304; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 1305; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma 1306; CHECK-NEXT: vnsrl.wi v8, v9, 0 1307; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma 1308; CHECK-NEXT: vnsrl.wi v8, v8, 0 1309; CHECK-NEXT: ret 1310 %evec = call <vscale x 1 x i8> @llvm.experimental.constrained.fptosi.nxv1i8.nxv1f64(<vscale x 1 x double> %va, metadata !"fpexcept.strict") 1311 ret <vscale x 1 x i8> %evec 1312} 1313 1314declare <vscale x 1 x i8> @llvm.experimental.constrained.fptoui.nxv1i8.nxv1f64(<vscale x 1 x double>, metadata) 1315define <vscale x 1 x i8> @vfptoui_nxv1f64_nxv1i8(<vscale x 1 x double> %va) strictfp { 1316; CHECK-LABEL: vfptoui_nxv1f64_nxv1i8: 1317; CHECK: # %bb.0: 1318; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma 1319; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 1320; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma 1321; CHECK-NEXT: vnsrl.wi v8, v9, 0 1322; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma 1323; CHECK-NEXT: vnsrl.wi v8, v8, 0 1324; CHECK-NEXT: ret 1325 %evec = call <vscale x 1 x i8> @llvm.experimental.constrained.fptoui.nxv1i8.nxv1f64(<vscale x 1 x double> %va, metadata !"fpexcept.strict") 1326 ret <vscale x 1 x i8> %evec 1327} 1328 1329declare <vscale x 1 x i16> @llvm.experimental.constrained.fptosi.nxv1i16.nxv1f64(<vscale x 1 x double>, metadata) 1330define <vscale x 1 x i16> @vfptosi_nxv1f64_nxv1i16(<vscale x 1 x double> %va) strictfp { 1331; CHECK-LABEL: vfptosi_nxv1f64_nxv1i16: 1332; CHECK: # %bb.0: 1333; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma 1334; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 1335; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma 1336; CHECK-NEXT: vnsrl.wi v8, v9, 0 1337; CHECK-NEXT: ret 1338 %evec = call <vscale x 1 x i16> @llvm.experimental.constrained.fptosi.nxv1i16.nxv1f64(<vscale x 1 x double> %va, metadata !"fpexcept.strict") 1339 ret <vscale x 1 x i16> %evec 1340} 1341 1342declare <vscale x 1 x i16> @llvm.experimental.constrained.fptoui.nxv1i16.nxv1f64(<vscale x 1 x double>, metadata) 1343define <vscale x 1 x i16> @vfptoui_nxv1f64_nxv1i16(<vscale x 1 x double> %va) strictfp { 1344; CHECK-LABEL: vfptoui_nxv1f64_nxv1i16: 1345; CHECK: # %bb.0: 1346; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma 1347; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 1348; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma 1349; CHECK-NEXT: vnsrl.wi v8, v9, 0 1350; CHECK-NEXT: ret 1351 %evec = call <vscale x 1 x i16> @llvm.experimental.constrained.fptoui.nxv1i16.nxv1f64(<vscale x 1 x double> %va, metadata !"fpexcept.strict") 1352 ret <vscale x 1 x i16> %evec 1353} 1354 1355declare <vscale x 1 x i32> @llvm.experimental.constrained.fptosi.nxv1i32.nxv1f64(<vscale x 1 x double>, metadata) 1356define <vscale x 1 x i32> @vfptosi_nxv1f64_nxv1i32(<vscale x 1 x double> %va) strictfp { 1357; CHECK-LABEL: vfptosi_nxv1f64_nxv1i32: 1358; CHECK: # %bb.0: 1359; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma 1360; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 1361; CHECK-NEXT: vmv1r.v v8, v9 1362; CHECK-NEXT: ret 1363 %evec = call <vscale x 1 x i32> @llvm.experimental.constrained.fptosi.nxv1i32.nxv1f64(<vscale x 1 x double> %va, metadata !"fpexcept.strict") 1364 ret <vscale x 1 x i32> %evec 1365} 1366 1367declare <vscale x 1 x i32> @llvm.experimental.constrained.fptoui.nxv1i32.nxv1f64(<vscale x 1 x double>, metadata) 1368define <vscale x 1 x i32> @vfptoui_nxv1f64_nxv1i32(<vscale x 1 x double> %va) strictfp { 1369; CHECK-LABEL: vfptoui_nxv1f64_nxv1i32: 1370; CHECK: # %bb.0: 1371; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma 1372; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 1373; CHECK-NEXT: vmv1r.v v8, v9 1374; CHECK-NEXT: ret 1375 %evec = call <vscale x 1 x i32> @llvm.experimental.constrained.fptoui.nxv1i32.nxv1f64(<vscale x 1 x double> %va, metadata !"fpexcept.strict") 1376 ret <vscale x 1 x i32> %evec 1377} 1378 1379declare <vscale x 1 x i64> @llvm.experimental.constrained.fptosi.nxv1i64.nxv1f64(<vscale x 1 x double>, metadata) 1380define <vscale x 1 x i64> @vfptosi_nxv1f64_nxv1i64(<vscale x 1 x double> %va) strictfp { 1381; CHECK-LABEL: vfptosi_nxv1f64_nxv1i64: 1382; CHECK: # %bb.0: 1383; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma 1384; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 1385; CHECK-NEXT: ret 1386 %evec = call <vscale x 1 x i64> @llvm.experimental.constrained.fptosi.nxv1i64.nxv1f64(<vscale x 1 x double> %va, metadata !"fpexcept.strict") 1387 ret <vscale x 1 x i64> %evec 1388} 1389 1390declare <vscale x 1 x i64> @llvm.experimental.constrained.fptoui.nxv1i64.nxv1f64(<vscale x 1 x double>, metadata) 1391define <vscale x 1 x i64> @vfptoui_nxv1f64_nxv1i64(<vscale x 1 x double> %va) strictfp { 1392; CHECK-LABEL: vfptoui_nxv1f64_nxv1i64: 1393; CHECK: # %bb.0: 1394; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma 1395; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 1396; CHECK-NEXT: ret 1397 %evec = call <vscale x 1 x i64> @llvm.experimental.constrained.fptoui.nxv1i64.nxv1f64(<vscale x 1 x double> %va, metadata !"fpexcept.strict") 1398 ret <vscale x 1 x i64> %evec 1399} 1400 1401declare <vscale x 2 x i1> @llvm.experimental.constrained.fptosi.nxv2i1.nxv2f64(<vscale x 2 x double>, metadata) 1402define <vscale x 2 x i1> @vfptosi_nxv2f64_nxv2i1(<vscale x 2 x double> %va) strictfp { 1403; CHECK-LABEL: vfptosi_nxv2f64_nxv2i1: 1404; CHECK: # %bb.0: 1405; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma 1406; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 1407; CHECK-NEXT: vand.vi v8, v10, 1 1408; CHECK-NEXT: vmsne.vi v0, v8, 0 1409; CHECK-NEXT: ret 1410 %evec = call <vscale x 2 x i1> @llvm.experimental.constrained.fptosi.nxv2i1.nxv2f64(<vscale x 2 x double> %va, metadata !"fpexcept.strict") 1411 ret <vscale x 2 x i1> %evec 1412} 1413 1414declare <vscale x 2 x i1> @llvm.experimental.constrained.fptoui.nxv2i1.nxv2f64(<vscale x 2 x double>, metadata) 1415define <vscale x 2 x i1> @vfptoui_nxv2f64_nxv2i1(<vscale x 2 x double> %va) strictfp { 1416; CHECK-LABEL: vfptoui_nxv2f64_nxv2i1: 1417; CHECK: # %bb.0: 1418; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma 1419; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 1420; CHECK-NEXT: vand.vi v8, v10, 1 1421; CHECK-NEXT: vmsne.vi v0, v8, 0 1422; CHECK-NEXT: ret 1423 %evec = call <vscale x 2 x i1> @llvm.experimental.constrained.fptoui.nxv2i1.nxv2f64(<vscale x 2 x double> %va, metadata !"fpexcept.strict") 1424 ret <vscale x 2 x i1> %evec 1425} 1426 1427declare <vscale x 2 x i8> @llvm.experimental.constrained.fptosi.nxv2i8.nxv2f64(<vscale x 2 x double>, metadata) 1428define <vscale x 2 x i8> @vfptosi_nxv2f64_nxv2i8(<vscale x 2 x double> %va) strictfp { 1429; CHECK-LABEL: vfptosi_nxv2f64_nxv2i8: 1430; CHECK: # %bb.0: 1431; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma 1432; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 1433; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma 1434; CHECK-NEXT: vnsrl.wi v8, v10, 0 1435; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma 1436; CHECK-NEXT: vnsrl.wi v8, v8, 0 1437; CHECK-NEXT: ret 1438 %evec = call <vscale x 2 x i8> @llvm.experimental.constrained.fptosi.nxv2i8.nxv2f64(<vscale x 2 x double> %va, metadata !"fpexcept.strict") 1439 ret <vscale x 2 x i8> %evec 1440} 1441 1442declare <vscale x 2 x i8> @llvm.experimental.constrained.fptoui.nxv2i8.nxv2f64(<vscale x 2 x double>, metadata) 1443define <vscale x 2 x i8> @vfptoui_nxv2f64_nxv2i8(<vscale x 2 x double> %va) strictfp { 1444; CHECK-LABEL: vfptoui_nxv2f64_nxv2i8: 1445; CHECK: # %bb.0: 1446; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma 1447; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 1448; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma 1449; CHECK-NEXT: vnsrl.wi v8, v10, 0 1450; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma 1451; CHECK-NEXT: vnsrl.wi v8, v8, 0 1452; CHECK-NEXT: ret 1453 %evec = call <vscale x 2 x i8> @llvm.experimental.constrained.fptoui.nxv2i8.nxv2f64(<vscale x 2 x double> %va, metadata !"fpexcept.strict") 1454 ret <vscale x 2 x i8> %evec 1455} 1456 1457declare <vscale x 2 x i16> @llvm.experimental.constrained.fptosi.nxv2i16.nxv2f64(<vscale x 2 x double>, metadata) 1458define <vscale x 2 x i16> @vfptosi_nxv2f64_nxv2i16(<vscale x 2 x double> %va) strictfp { 1459; CHECK-LABEL: vfptosi_nxv2f64_nxv2i16: 1460; CHECK: # %bb.0: 1461; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma 1462; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 1463; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma 1464; CHECK-NEXT: vnsrl.wi v8, v10, 0 1465; CHECK-NEXT: ret 1466 %evec = call <vscale x 2 x i16> @llvm.experimental.constrained.fptosi.nxv2i16.nxv2f64(<vscale x 2 x double> %va, metadata !"fpexcept.strict") 1467 ret <vscale x 2 x i16> %evec 1468} 1469 1470declare <vscale x 2 x i16> @llvm.experimental.constrained.fptoui.nxv2i16.nxv2f64(<vscale x 2 x double>, metadata) 1471define <vscale x 2 x i16> @vfptoui_nxv2f64_nxv2i16(<vscale x 2 x double> %va) strictfp { 1472; CHECK-LABEL: vfptoui_nxv2f64_nxv2i16: 1473; CHECK: # %bb.0: 1474; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma 1475; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 1476; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma 1477; CHECK-NEXT: vnsrl.wi v8, v10, 0 1478; CHECK-NEXT: ret 1479 %evec = call <vscale x 2 x i16> @llvm.experimental.constrained.fptoui.nxv2i16.nxv2f64(<vscale x 2 x double> %va, metadata !"fpexcept.strict") 1480 ret <vscale x 2 x i16> %evec 1481} 1482 1483declare <vscale x 2 x i32> @llvm.experimental.constrained.fptosi.nxv2i32.nxv2f64(<vscale x 2 x double>, metadata) 1484define <vscale x 2 x i32> @vfptosi_nxv2f64_nxv2i32(<vscale x 2 x double> %va) strictfp { 1485; CHECK-LABEL: vfptosi_nxv2f64_nxv2i32: 1486; CHECK: # %bb.0: 1487; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma 1488; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 1489; CHECK-NEXT: vmv.v.v v8, v10 1490; CHECK-NEXT: ret 1491 %evec = call <vscale x 2 x i32> @llvm.experimental.constrained.fptosi.nxv2i32.nxv2f64(<vscale x 2 x double> %va, metadata !"fpexcept.strict") 1492 ret <vscale x 2 x i32> %evec 1493} 1494 1495declare <vscale x 2 x i32> @llvm.experimental.constrained.fptoui.nxv2i32.nxv2f64(<vscale x 2 x double>, metadata) 1496define <vscale x 2 x i32> @vfptoui_nxv2f64_nxv2i32(<vscale x 2 x double> %va) strictfp { 1497; CHECK-LABEL: vfptoui_nxv2f64_nxv2i32: 1498; CHECK: # %bb.0: 1499; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma 1500; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 1501; CHECK-NEXT: vmv.v.v v8, v10 1502; CHECK-NEXT: ret 1503 %evec = call <vscale x 2 x i32> @llvm.experimental.constrained.fptoui.nxv2i32.nxv2f64(<vscale x 2 x double> %va, metadata !"fpexcept.strict") 1504 ret <vscale x 2 x i32> %evec 1505} 1506 1507declare <vscale x 2 x i64> @llvm.experimental.constrained.fptosi.nxv2i64.nxv2f64(<vscale x 2 x double>, metadata) 1508define <vscale x 2 x i64> @vfptosi_nxv2f64_nxv2i64(<vscale x 2 x double> %va) strictfp { 1509; CHECK-LABEL: vfptosi_nxv2f64_nxv2i64: 1510; CHECK: # %bb.0: 1511; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma 1512; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 1513; CHECK-NEXT: ret 1514 %evec = call <vscale x 2 x i64> @llvm.experimental.constrained.fptosi.nxv2i64.nxv2f64(<vscale x 2 x double> %va, metadata !"fpexcept.strict") 1515 ret <vscale x 2 x i64> %evec 1516} 1517 1518declare <vscale x 2 x i64> @llvm.experimental.constrained.fptoui.nxv2i64.nxv2f64(<vscale x 2 x double>, metadata) 1519define <vscale x 2 x i64> @vfptoui_nxv2f64_nxv2i64(<vscale x 2 x double> %va) strictfp { 1520; CHECK-LABEL: vfptoui_nxv2f64_nxv2i64: 1521; CHECK: # %bb.0: 1522; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma 1523; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 1524; CHECK-NEXT: ret 1525 %evec = call <vscale x 2 x i64> @llvm.experimental.constrained.fptoui.nxv2i64.nxv2f64(<vscale x 2 x double> %va, metadata !"fpexcept.strict") 1526 ret <vscale x 2 x i64> %evec 1527} 1528 1529declare <vscale x 4 x i1> @llvm.experimental.constrained.fptosi.nxv4i1.nxv4f64(<vscale x 4 x double>, metadata) 1530define <vscale x 4 x i1> @vfptosi_nxv4f64_nxv4i1(<vscale x 4 x double> %va) strictfp { 1531; CHECK-LABEL: vfptosi_nxv4f64_nxv4i1: 1532; CHECK: # %bb.0: 1533; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma 1534; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 1535; CHECK-NEXT: vand.vi v8, v12, 1 1536; CHECK-NEXT: vmsne.vi v0, v8, 0 1537; CHECK-NEXT: ret 1538 %evec = call <vscale x 4 x i1> @llvm.experimental.constrained.fptosi.nxv4i1.nxv4f64(<vscale x 4 x double> %va, metadata !"fpexcept.strict") 1539 ret <vscale x 4 x i1> %evec 1540} 1541 1542declare <vscale x 4 x i1> @llvm.experimental.constrained.fptoui.nxv4i1.nxv4f64(<vscale x 4 x double>, metadata) 1543define <vscale x 4 x i1> @vfptoui_nxv4f64_nxv4i1(<vscale x 4 x double> %va) strictfp { 1544; CHECK-LABEL: vfptoui_nxv4f64_nxv4i1: 1545; CHECK: # %bb.0: 1546; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma 1547; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 1548; CHECK-NEXT: vand.vi v8, v12, 1 1549; CHECK-NEXT: vmsne.vi v0, v8, 0 1550; CHECK-NEXT: ret 1551 %evec = call <vscale x 4 x i1> @llvm.experimental.constrained.fptoui.nxv4i1.nxv4f64(<vscale x 4 x double> %va, metadata !"fpexcept.strict") 1552 ret <vscale x 4 x i1> %evec 1553} 1554 1555declare <vscale x 4 x i8> @llvm.experimental.constrained.fptosi.nxv4i8.nxv4f64(<vscale x 4 x double>, metadata) 1556define <vscale x 4 x i8> @vfptosi_nxv4f64_nxv4i8(<vscale x 4 x double> %va) strictfp { 1557; CHECK-LABEL: vfptosi_nxv4f64_nxv4i8: 1558; CHECK: # %bb.0: 1559; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma 1560; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 1561; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma 1562; CHECK-NEXT: vnsrl.wi v8, v12, 0 1563; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma 1564; CHECK-NEXT: vnsrl.wi v8, v8, 0 1565; CHECK-NEXT: ret 1566 %evec = call <vscale x 4 x i8> @llvm.experimental.constrained.fptosi.nxv4i8.nxv4f64(<vscale x 4 x double> %va, metadata !"fpexcept.strict") 1567 ret <vscale x 4 x i8> %evec 1568} 1569 1570declare <vscale x 4 x i8> @llvm.experimental.constrained.fptoui.nxv4i8.nxv4f64(<vscale x 4 x double>, metadata) 1571define <vscale x 4 x i8> @vfptoui_nxv4f64_nxv4i8(<vscale x 4 x double> %va) strictfp { 1572; CHECK-LABEL: vfptoui_nxv4f64_nxv4i8: 1573; CHECK: # %bb.0: 1574; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma 1575; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 1576; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma 1577; CHECK-NEXT: vnsrl.wi v8, v12, 0 1578; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma 1579; CHECK-NEXT: vnsrl.wi v8, v8, 0 1580; CHECK-NEXT: ret 1581 %evec = call <vscale x 4 x i8> @llvm.experimental.constrained.fptoui.nxv4i8.nxv4f64(<vscale x 4 x double> %va, metadata !"fpexcept.strict") 1582 ret <vscale x 4 x i8> %evec 1583} 1584 1585declare <vscale x 4 x i16> @llvm.experimental.constrained.fptosi.nxv4i16.nxv4f64(<vscale x 4 x double>, metadata) 1586define <vscale x 4 x i16> @vfptosi_nxv4f64_nxv4i16(<vscale x 4 x double> %va) strictfp { 1587; CHECK-LABEL: vfptosi_nxv4f64_nxv4i16: 1588; CHECK: # %bb.0: 1589; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma 1590; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 1591; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma 1592; CHECK-NEXT: vnsrl.wi v8, v12, 0 1593; CHECK-NEXT: ret 1594 %evec = call <vscale x 4 x i16> @llvm.experimental.constrained.fptosi.nxv4i16.nxv4f64(<vscale x 4 x double> %va, metadata !"fpexcept.strict") 1595 ret <vscale x 4 x i16> %evec 1596} 1597 1598declare <vscale x 4 x i16> @llvm.experimental.constrained.fptoui.nxv4i16.nxv4f64(<vscale x 4 x double>, metadata) 1599define <vscale x 4 x i16> @vfptoui_nxv4f64_nxv4i16(<vscale x 4 x double> %va) strictfp { 1600; CHECK-LABEL: vfptoui_nxv4f64_nxv4i16: 1601; CHECK: # %bb.0: 1602; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma 1603; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 1604; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma 1605; CHECK-NEXT: vnsrl.wi v8, v12, 0 1606; CHECK-NEXT: ret 1607 %evec = call <vscale x 4 x i16> @llvm.experimental.constrained.fptoui.nxv4i16.nxv4f64(<vscale x 4 x double> %va, metadata !"fpexcept.strict") 1608 ret <vscale x 4 x i16> %evec 1609} 1610 1611declare <vscale x 4 x i32> @llvm.experimental.constrained.fptosi.nxv4i32.nxv4f64(<vscale x 4 x double>, metadata) 1612define <vscale x 4 x i32> @vfptosi_nxv4f64_nxv4i32(<vscale x 4 x double> %va) strictfp { 1613; CHECK-LABEL: vfptosi_nxv4f64_nxv4i32: 1614; CHECK: # %bb.0: 1615; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma 1616; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 1617; CHECK-NEXT: vmv.v.v v8, v12 1618; CHECK-NEXT: ret 1619 %evec = call <vscale x 4 x i32> @llvm.experimental.constrained.fptosi.nxv4i32.nxv4f64(<vscale x 4 x double> %va, metadata !"fpexcept.strict") 1620 ret <vscale x 4 x i32> %evec 1621} 1622 1623declare <vscale x 4 x i32> @llvm.experimental.constrained.fptoui.nxv4i32.nxv4f64(<vscale x 4 x double>, metadata) 1624define <vscale x 4 x i32> @vfptoui_nxv4f64_nxv4i32(<vscale x 4 x double> %va) strictfp { 1625; CHECK-LABEL: vfptoui_nxv4f64_nxv4i32: 1626; CHECK: # %bb.0: 1627; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma 1628; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 1629; CHECK-NEXT: vmv.v.v v8, v12 1630; CHECK-NEXT: ret 1631 %evec = call <vscale x 4 x i32> @llvm.experimental.constrained.fptoui.nxv4i32.nxv4f64(<vscale x 4 x double> %va, metadata !"fpexcept.strict") 1632 ret <vscale x 4 x i32> %evec 1633} 1634 1635declare <vscale x 4 x i64> @llvm.experimental.constrained.fptosi.nxv4i64.nxv4f64(<vscale x 4 x double>, metadata) 1636define <vscale x 4 x i64> @vfptosi_nxv4f64_nxv4i64(<vscale x 4 x double> %va) strictfp { 1637; CHECK-LABEL: vfptosi_nxv4f64_nxv4i64: 1638; CHECK: # %bb.0: 1639; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma 1640; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 1641; CHECK-NEXT: ret 1642 %evec = call <vscale x 4 x i64> @llvm.experimental.constrained.fptosi.nxv4i64.nxv4f64(<vscale x 4 x double> %va, metadata !"fpexcept.strict") 1643 ret <vscale x 4 x i64> %evec 1644} 1645 1646declare <vscale x 4 x i64> @llvm.experimental.constrained.fptoui.nxv4i64.nxv4f64(<vscale x 4 x double>, metadata) 1647define <vscale x 4 x i64> @vfptoui_nxv4f64_nxv4i64(<vscale x 4 x double> %va) strictfp { 1648; CHECK-LABEL: vfptoui_nxv4f64_nxv4i64: 1649; CHECK: # %bb.0: 1650; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma 1651; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 1652; CHECK-NEXT: ret 1653 %evec = call <vscale x 4 x i64> @llvm.experimental.constrained.fptoui.nxv4i64.nxv4f64(<vscale x 4 x double> %va, metadata !"fpexcept.strict") 1654 ret <vscale x 4 x i64> %evec 1655} 1656 1657declare <vscale x 8 x i1> @llvm.experimental.constrained.fptosi.nxv8i1.nxv8f64(<vscale x 8 x double>, metadata) 1658define <vscale x 8 x i1> @vfptosi_nxv8f64_nxv8i1(<vscale x 8 x double> %va) strictfp { 1659; CHECK-LABEL: vfptosi_nxv8f64_nxv8i1: 1660; CHECK: # %bb.0: 1661; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma 1662; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 1663; CHECK-NEXT: vand.vi v8, v16, 1 1664; CHECK-NEXT: vmsne.vi v0, v8, 0 1665; CHECK-NEXT: ret 1666 %evec = call <vscale x 8 x i1> @llvm.experimental.constrained.fptosi.nxv8i1.nxv8f64(<vscale x 8 x double> %va, metadata !"fpexcept.strict") 1667 ret <vscale x 8 x i1> %evec 1668} 1669 1670declare <vscale x 8 x i1> @llvm.experimental.constrained.fptoui.nxv8i1.nxv8f64(<vscale x 8 x double>, metadata) 1671define <vscale x 8 x i1> @vfptoui_nxv8f64_nxv8i1(<vscale x 8 x double> %va) strictfp { 1672; CHECK-LABEL: vfptoui_nxv8f64_nxv8i1: 1673; CHECK: # %bb.0: 1674; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma 1675; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 1676; CHECK-NEXT: vand.vi v8, v16, 1 1677; CHECK-NEXT: vmsne.vi v0, v8, 0 1678; CHECK-NEXT: ret 1679 %evec = call <vscale x 8 x i1> @llvm.experimental.constrained.fptoui.nxv8i1.nxv8f64(<vscale x 8 x double> %va, metadata !"fpexcept.strict") 1680 ret <vscale x 8 x i1> %evec 1681} 1682 1683declare <vscale x 8 x i8> @llvm.experimental.constrained.fptosi.nxv8i8.nxv8f64(<vscale x 8 x double>, metadata) 1684define <vscale x 8 x i8> @vfptosi_nxv8f64_nxv8i8(<vscale x 8 x double> %va) strictfp { 1685; CHECK-LABEL: vfptosi_nxv8f64_nxv8i8: 1686; CHECK: # %bb.0: 1687; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma 1688; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 1689; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma 1690; CHECK-NEXT: vnsrl.wi v10, v16, 0 1691; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma 1692; CHECK-NEXT: vnsrl.wi v8, v10, 0 1693; CHECK-NEXT: ret 1694 %evec = call <vscale x 8 x i8> @llvm.experimental.constrained.fptosi.nxv8i8.nxv8f64(<vscale x 8 x double> %va, metadata !"fpexcept.strict") 1695 ret <vscale x 8 x i8> %evec 1696} 1697 1698declare <vscale x 8 x i8> @llvm.experimental.constrained.fptoui.nxv8i8.nxv8f64(<vscale x 8 x double>, metadata) 1699define <vscale x 8 x i8> @vfptoui_nxv8f64_nxv8i8(<vscale x 8 x double> %va) strictfp { 1700; CHECK-LABEL: vfptoui_nxv8f64_nxv8i8: 1701; CHECK: # %bb.0: 1702; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma 1703; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 1704; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma 1705; CHECK-NEXT: vnsrl.wi v10, v16, 0 1706; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma 1707; CHECK-NEXT: vnsrl.wi v8, v10, 0 1708; CHECK-NEXT: ret 1709 %evec = call <vscale x 8 x i8> @llvm.experimental.constrained.fptoui.nxv8i8.nxv8f64(<vscale x 8 x double> %va, metadata !"fpexcept.strict") 1710 ret <vscale x 8 x i8> %evec 1711} 1712 1713declare <vscale x 8 x i16> @llvm.experimental.constrained.fptosi.nxv8i16.nxv8f64(<vscale x 8 x double>, metadata) 1714define <vscale x 8 x i16> @vfptosi_nxv8f64_nxv8i16(<vscale x 8 x double> %va) strictfp { 1715; CHECK-LABEL: vfptosi_nxv8f64_nxv8i16: 1716; CHECK: # %bb.0: 1717; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma 1718; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 1719; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma 1720; CHECK-NEXT: vnsrl.wi v8, v16, 0 1721; CHECK-NEXT: ret 1722 %evec = call <vscale x 8 x i16> @llvm.experimental.constrained.fptosi.nxv8i16.nxv8f64(<vscale x 8 x double> %va, metadata !"fpexcept.strict") 1723 ret <vscale x 8 x i16> %evec 1724} 1725 1726declare <vscale x 8 x i16> @llvm.experimental.constrained.fptoui.nxv8i16.nxv8f64(<vscale x 8 x double>, metadata) 1727define <vscale x 8 x i16> @vfptoui_nxv8f64_nxv8i16(<vscale x 8 x double> %va) strictfp { 1728; CHECK-LABEL: vfptoui_nxv8f64_nxv8i16: 1729; CHECK: # %bb.0: 1730; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma 1731; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 1732; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma 1733; CHECK-NEXT: vnsrl.wi v8, v16, 0 1734; CHECK-NEXT: ret 1735 %evec = call <vscale x 8 x i16> @llvm.experimental.constrained.fptoui.nxv8i16.nxv8f64(<vscale x 8 x double> %va, metadata !"fpexcept.strict") 1736 ret <vscale x 8 x i16> %evec 1737} 1738 1739declare <vscale x 8 x i32> @llvm.experimental.constrained.fptosi.nxv8i32.nxv8f64(<vscale x 8 x double>, metadata) 1740define <vscale x 8 x i32> @vfptosi_nxv8f64_nxv8i32(<vscale x 8 x double> %va) strictfp { 1741; CHECK-LABEL: vfptosi_nxv8f64_nxv8i32: 1742; CHECK: # %bb.0: 1743; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma 1744; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 1745; CHECK-NEXT: vmv.v.v v8, v16 1746; CHECK-NEXT: ret 1747 %evec = call <vscale x 8 x i32> @llvm.experimental.constrained.fptosi.nxv8i32.nxv8f64(<vscale x 8 x double> %va, metadata !"fpexcept.strict") 1748 ret <vscale x 8 x i32> %evec 1749} 1750 1751declare <vscale x 8 x i32> @llvm.experimental.constrained.fptoui.nxv8i32.nxv8f64(<vscale x 8 x double>, metadata) 1752define <vscale x 8 x i32> @vfptoui_nxv8f64_nxv8i32(<vscale x 8 x double> %va) strictfp { 1753; CHECK-LABEL: vfptoui_nxv8f64_nxv8i32: 1754; CHECK: # %bb.0: 1755; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma 1756; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 1757; CHECK-NEXT: vmv.v.v v8, v16 1758; CHECK-NEXT: ret 1759 %evec = call <vscale x 8 x i32> @llvm.experimental.constrained.fptoui.nxv8i32.nxv8f64(<vscale x 8 x double> %va, metadata !"fpexcept.strict") 1760 ret <vscale x 8 x i32> %evec 1761} 1762 1763declare <vscale x 8 x i64> @llvm.experimental.constrained.fptosi.nxv8i64.nxv8f64(<vscale x 8 x double>, metadata) 1764define <vscale x 8 x i64> @vfptosi_nxv8f64_nxv8i64(<vscale x 8 x double> %va) strictfp { 1765; CHECK-LABEL: vfptosi_nxv8f64_nxv8i64: 1766; CHECK: # %bb.0: 1767; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma 1768; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 1769; CHECK-NEXT: ret 1770 %evec = call <vscale x 8 x i64> @llvm.experimental.constrained.fptosi.nxv8i64.nxv8f64(<vscale x 8 x double> %va, metadata !"fpexcept.strict") 1771 ret <vscale x 8 x i64> %evec 1772} 1773 1774declare <vscale x 8 x i64> @llvm.experimental.constrained.fptoui.nxv8i64.nxv8f64(<vscale x 8 x double>, metadata) 1775define <vscale x 8 x i64> @vfptoui_nxv8f64_nxv8i64(<vscale x 8 x double> %va) strictfp { 1776; CHECK-LABEL: vfptoui_nxv8f64_nxv8i64: 1777; CHECK: # %bb.0: 1778; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma 1779; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 1780; CHECK-NEXT: ret 1781 %evec = call <vscale x 8 x i64> @llvm.experimental.constrained.fptoui.nxv8i64.nxv8f64(<vscale x 8 x double> %va, metadata !"fpexcept.strict") 1782 ret <vscale x 8 x i64> %evec 1783} 1784