1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v,+zvfbfmin -target-abi=ilp32d \ 3; RUN: -verify-machineinstrs < %s | FileCheck %s 4; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+zvfbfmin -target-abi=lp64d \ 5; RUN: -verify-machineinstrs < %s | FileCheck %s 6; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+v,+zvfbfmin -target-abi=ilp32d \ 7; RUN: -verify-machineinstrs < %s | FileCheck %s 8; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v,+zvfbfmin -target-abi=lp64d \ 9; RUN: -verify-machineinstrs < %s | FileCheck %s 10 11define <vscale x 1 x float> @vfpext_nxv1f16_nxv1f32(<vscale x 1 x half> %va) { 12; 13; CHECK-LABEL: vfpext_nxv1f16_nxv1f32: 14; CHECK: # %bb.0: 15; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma 16; CHECK-NEXT: vfwcvt.f.f.v v9, v8 17; CHECK-NEXT: vmv1r.v v8, v9 18; CHECK-NEXT: ret 19 %evec = fpext <vscale x 1 x half> %va to <vscale x 1 x float> 20 ret <vscale x 1 x float> %evec 21} 22 23define <vscale x 1 x double> @vfpext_nxv1f16_nxv1f64(<vscale x 1 x half> %va) { 24; 25; CHECK-LABEL: vfpext_nxv1f16_nxv1f64: 26; CHECK: # %bb.0: 27; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma 28; CHECK-NEXT: vfwcvt.f.f.v v9, v8 29; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma 30; CHECK-NEXT: vfwcvt.f.f.v v8, v9 31; CHECK-NEXT: ret 32 %evec = fpext <vscale x 1 x half> %va to <vscale x 1 x double> 33 ret <vscale x 1 x double> %evec 34} 35 36define <vscale x 2 x float> @vfpext_nxv2f16_nxv2f32(<vscale x 2 x half> %va) { 37; 38; CHECK-LABEL: vfpext_nxv2f16_nxv2f32: 39; CHECK: # %bb.0: 40; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma 41; CHECK-NEXT: vfwcvt.f.f.v v9, v8 42; CHECK-NEXT: vmv1r.v v8, v9 43; CHECK-NEXT: ret 44 %evec = fpext <vscale x 2 x half> %va to <vscale x 2 x float> 45 ret <vscale x 2 x float> %evec 46} 47 48define <vscale x 2 x double> @vfpext_nxv2f16_nxv2f64(<vscale x 2 x half> %va) { 49; 50; CHECK-LABEL: vfpext_nxv2f16_nxv2f64: 51; CHECK: # %bb.0: 52; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma 53; CHECK-NEXT: vfwcvt.f.f.v v10, v8 54; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma 55; CHECK-NEXT: vfwcvt.f.f.v v8, v10 56; CHECK-NEXT: ret 57 %evec = fpext <vscale x 2 x half> %va to <vscale x 2 x double> 58 ret <vscale x 2 x double> %evec 59} 60 61define <vscale x 4 x float> @vfpext_nxv4f16_nxv4f32(<vscale x 4 x half> %va) { 62; 63; CHECK-LABEL: vfpext_nxv4f16_nxv4f32: 64; CHECK: # %bb.0: 65; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma 66; CHECK-NEXT: vfwcvt.f.f.v v10, v8 67; CHECK-NEXT: vmv2r.v v8, v10 68; CHECK-NEXT: ret 69 %evec = fpext <vscale x 4 x half> %va to <vscale x 4 x float> 70 ret <vscale x 4 x float> %evec 71} 72 73define <vscale x 4 x double> @vfpext_nxv4f16_nxv4f64(<vscale x 4 x half> %va) { 74; 75; CHECK-LABEL: vfpext_nxv4f16_nxv4f64: 76; CHECK: # %bb.0: 77; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma 78; CHECK-NEXT: vfwcvt.f.f.v v12, v8 79; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma 80; CHECK-NEXT: vfwcvt.f.f.v v8, v12 81; CHECK-NEXT: ret 82 %evec = fpext <vscale x 4 x half> %va to <vscale x 4 x double> 83 ret <vscale x 4 x double> %evec 84} 85 86define <vscale x 8 x float> @vfpext_nxv8f16_nxv8f32(<vscale x 8 x half> %va) { 87; 88; CHECK-LABEL: vfpext_nxv8f16_nxv8f32: 89; CHECK: # %bb.0: 90; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma 91; CHECK-NEXT: vfwcvt.f.f.v v12, v8 92; CHECK-NEXT: vmv4r.v v8, v12 93; CHECK-NEXT: ret 94 %evec = fpext <vscale x 8 x half> %va to <vscale x 8 x float> 95 ret <vscale x 8 x float> %evec 96} 97 98define <vscale x 8 x double> @vfpext_nxv8f16_nxv8f64(<vscale x 8 x half> %va) { 99; 100; CHECK-LABEL: vfpext_nxv8f16_nxv8f64: 101; CHECK: # %bb.0: 102; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma 103; CHECK-NEXT: vfwcvt.f.f.v v16, v8 104; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma 105; CHECK-NEXT: vfwcvt.f.f.v v8, v16 106; CHECK-NEXT: ret 107 %evec = fpext <vscale x 8 x half> %va to <vscale x 8 x double> 108 ret <vscale x 8 x double> %evec 109} 110 111define <vscale x 16 x float> @vfpext_nxv16f16_nxv16f32(<vscale x 16 x half> %va) { 112; 113; CHECK-LABEL: vfpext_nxv16f16_nxv16f32: 114; CHECK: # %bb.0: 115; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma 116; CHECK-NEXT: vfwcvt.f.f.v v16, v8 117; CHECK-NEXT: vmv8r.v v8, v16 118; CHECK-NEXT: ret 119 %evec = fpext <vscale x 16 x half> %va to <vscale x 16 x float> 120 ret <vscale x 16 x float> %evec 121} 122 123define <vscale x 1 x double> @vfpext_nxv1f32_nxv1f64(<vscale x 1 x float> %va) { 124; 125; CHECK-LABEL: vfpext_nxv1f32_nxv1f64: 126; CHECK: # %bb.0: 127; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma 128; CHECK-NEXT: vfwcvt.f.f.v v9, v8 129; CHECK-NEXT: vmv1r.v v8, v9 130; CHECK-NEXT: ret 131 %evec = fpext <vscale x 1 x float> %va to <vscale x 1 x double> 132 ret <vscale x 1 x double> %evec 133} 134 135define <vscale x 2 x double> @vfpext_nxv2f32_nxv2f64(<vscale x 2 x float> %va) { 136; 137; CHECK-LABEL: vfpext_nxv2f32_nxv2f64: 138; CHECK: # %bb.0: 139; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma 140; CHECK-NEXT: vfwcvt.f.f.v v10, v8 141; CHECK-NEXT: vmv2r.v v8, v10 142; CHECK-NEXT: ret 143 %evec = fpext <vscale x 2 x float> %va to <vscale x 2 x double> 144 ret <vscale x 2 x double> %evec 145} 146 147define <vscale x 4 x double> @vfpext_nxv4f32_nxv4f64(<vscale x 4 x float> %va) { 148; 149; CHECK-LABEL: vfpext_nxv4f32_nxv4f64: 150; CHECK: # %bb.0: 151; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma 152; CHECK-NEXT: vfwcvt.f.f.v v12, v8 153; CHECK-NEXT: vmv4r.v v8, v12 154; CHECK-NEXT: ret 155 %evec = fpext <vscale x 4 x float> %va to <vscale x 4 x double> 156 ret <vscale x 4 x double> %evec 157} 158 159define <vscale x 8 x double> @vfpext_nxv8f32_nxv8f64(<vscale x 8 x float> %va) { 160; 161; CHECK-LABEL: vfpext_nxv8f32_nxv8f64: 162; CHECK: # %bb.0: 163; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma 164; CHECK-NEXT: vfwcvt.f.f.v v16, v8 165; CHECK-NEXT: vmv8r.v v8, v16 166; CHECK-NEXT: ret 167 %evec = fpext <vscale x 8 x float> %va to <vscale x 8 x double> 168 ret <vscale x 8 x double> %evec 169} 170 171define <vscale x 1 x float> @vfpext_nxv1bf16_nxv1f32(<vscale x 1 x bfloat> %va) { 172; 173; CHECK-LABEL: vfpext_nxv1bf16_nxv1f32: 174; CHECK: # %bb.0: 175; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma 176; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8 177; CHECK-NEXT: vmv1r.v v8, v9 178; CHECK-NEXT: ret 179 %evec = fpext <vscale x 1 x bfloat> %va to <vscale x 1 x float> 180 ret <vscale x 1 x float> %evec 181} 182 183define <vscale x 1 x double> @vfpext_nxv1bf16_nxv1f64(<vscale x 1 x bfloat> %va) { 184; 185; CHECK-LABEL: vfpext_nxv1bf16_nxv1f64: 186; CHECK: # %bb.0: 187; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma 188; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8 189; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma 190; CHECK-NEXT: vfwcvt.f.f.v v8, v9 191; CHECK-NEXT: ret 192 %evec = fpext <vscale x 1 x bfloat> %va to <vscale x 1 x double> 193 ret <vscale x 1 x double> %evec 194} 195 196define <vscale x 2 x float> @vfpext_nxv2bf16_nxv2f32(<vscale x 2 x bfloat> %va) { 197; 198; CHECK-LABEL: vfpext_nxv2bf16_nxv2f32: 199; CHECK: # %bb.0: 200; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma 201; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8 202; CHECK-NEXT: vmv1r.v v8, v9 203; CHECK-NEXT: ret 204 %evec = fpext <vscale x 2 x bfloat> %va to <vscale x 2 x float> 205 ret <vscale x 2 x float> %evec 206} 207 208define <vscale x 2 x double> @vfpext_nxv2bf16_nxv2f64(<vscale x 2 x bfloat> %va) { 209; 210; CHECK-LABEL: vfpext_nxv2bf16_nxv2f64: 211; CHECK: # %bb.0: 212; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma 213; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8 214; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma 215; CHECK-NEXT: vfwcvt.f.f.v v8, v10 216; CHECK-NEXT: ret 217 %evec = fpext <vscale x 2 x bfloat> %va to <vscale x 2 x double> 218 ret <vscale x 2 x double> %evec 219} 220 221define <vscale x 4 x float> @vfpext_nxv4bf16_nxv4f32(<vscale x 4 x bfloat> %va) { 222; 223; CHECK-LABEL: vfpext_nxv4bf16_nxv4f32: 224; CHECK: # %bb.0: 225; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma 226; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8 227; CHECK-NEXT: vmv2r.v v8, v10 228; CHECK-NEXT: ret 229 %evec = fpext <vscale x 4 x bfloat> %va to <vscale x 4 x float> 230 ret <vscale x 4 x float> %evec 231} 232 233define <vscale x 4 x double> @vfpext_nxv4bf16_nxv4f64(<vscale x 4 x bfloat> %va) { 234; 235; CHECK-LABEL: vfpext_nxv4bf16_nxv4f64: 236; CHECK: # %bb.0: 237; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma 238; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8 239; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma 240; CHECK-NEXT: vfwcvt.f.f.v v8, v12 241; CHECK-NEXT: ret 242 %evec = fpext <vscale x 4 x bfloat> %va to <vscale x 4 x double> 243 ret <vscale x 4 x double> %evec 244} 245 246define <vscale x 8 x float> @vfpext_nxv8bf16_nxv8f32(<vscale x 8 x bfloat> %va) { 247; 248; CHECK-LABEL: vfpext_nxv8bf16_nxv8f32: 249; CHECK: # %bb.0: 250; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma 251; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8 252; CHECK-NEXT: vmv4r.v v8, v12 253; CHECK-NEXT: ret 254 %evec = fpext <vscale x 8 x bfloat> %va to <vscale x 8 x float> 255 ret <vscale x 8 x float> %evec 256} 257 258define <vscale x 8 x double> @vfpext_nxv8bf16_nxv8f64(<vscale x 8 x bfloat> %va) { 259; 260; CHECK-LABEL: vfpext_nxv8bf16_nxv8f64: 261; CHECK: # %bb.0: 262; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma 263; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 264; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma 265; CHECK-NEXT: vfwcvt.f.f.v v8, v16 266; CHECK-NEXT: ret 267 %evec = fpext <vscale x 8 x bfloat> %va to <vscale x 8 x double> 268 ret <vscale x 8 x double> %evec 269} 270 271define <vscale x 16 x float> @vfpext_nxv16bf16_nxv16f32(<vscale x 16 x bfloat> %va) { 272; 273; CHECK-LABEL: vfpext_nxv16bf16_nxv16f32: 274; CHECK: # %bb.0: 275; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma 276; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 277; CHECK-NEXT: vmv8r.v v8, v16 278; CHECK-NEXT: ret 279 %evec = fpext <vscale x 16 x bfloat> %va to <vscale x 16 x float> 280 ret <vscale x 16 x float> %evec 281} 282