1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \ 3; RUN: -verify-machineinstrs < %s | FileCheck %s 4; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ 5; RUN: -verify-machineinstrs < %s | FileCheck %s 6 7; This file tests the code generation for `llvm.experimental.constrained.round.*` on scalable vector type. 8 9define <vscale x 1 x half> @round_nxv1f16(<vscale x 1 x half> %x) strictfp { 10; CHECK-LABEL: round_nxv1f16: 11; CHECK: # %bb.0: 12; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu 13; CHECK-NEXT: vmfne.vv v0, v8, v8 14; CHECK-NEXT: lui a0, %hi(.LCPI0_0) 15; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0) 16; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t 17; CHECK-NEXT: vfabs.v v9, v8 18; CHECK-NEXT: vmflt.vf v0, v9, fa5 19; CHECK-NEXT: fsrmi a0, 4 20; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma 21; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t 22; CHECK-NEXT: fsrm a0 23; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t 24; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu 25; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t 26; CHECK-NEXT: ret 27 %a = call <vscale x 1 x half> @llvm.experimental.constrained.round.nxv1f16(<vscale x 1 x half> %x, metadata !"fpexcept.strict") 28 ret <vscale x 1 x half> %a 29} 30declare <vscale x 1 x half> @llvm.experimental.constrained.round.nxv1f16(<vscale x 1 x half>, metadata) 31 32define <vscale x 2 x half> @round_nxv2f16(<vscale x 2 x half> %x) strictfp { 33; CHECK-LABEL: round_nxv2f16: 34; CHECK: # %bb.0: 35; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu 36; CHECK-NEXT: vmfne.vv v0, v8, v8 37; CHECK-NEXT: lui a0, %hi(.LCPI1_0) 38; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0) 39; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t 40; CHECK-NEXT: vfabs.v v9, v8 41; CHECK-NEXT: vmflt.vf v0, v9, fa5 42; CHECK-NEXT: fsrmi a0, 4 43; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma 44; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t 45; CHECK-NEXT: fsrm a0 46; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t 47; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu 48; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t 49; CHECK-NEXT: ret 50 %a = call <vscale x 2 x half> @llvm.experimental.constrained.round.nxv2f16(<vscale x 2 x half> %x, metadata !"fpexcept.strict") 51 ret <vscale x 2 x half> %a 52} 53declare <vscale x 2 x half> @llvm.experimental.constrained.round.nxv2f16(<vscale x 2 x half>, metadata) 54 55define <vscale x 4 x half> @round_nxv4f16(<vscale x 4 x half> %x) strictfp { 56; CHECK-LABEL: round_nxv4f16: 57; CHECK: # %bb.0: 58; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu 59; CHECK-NEXT: vmfne.vv v0, v8, v8 60; CHECK-NEXT: lui a0, %hi(.LCPI2_0) 61; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0) 62; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t 63; CHECK-NEXT: vfabs.v v9, v8 64; CHECK-NEXT: vmflt.vf v0, v9, fa5 65; CHECK-NEXT: fsrmi a0, 4 66; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma 67; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t 68; CHECK-NEXT: fsrm a0 69; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t 70; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu 71; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t 72; CHECK-NEXT: ret 73 %a = call <vscale x 4 x half> @llvm.experimental.constrained.round.nxv4f16(<vscale x 4 x half> %x, metadata !"fpexcept.strict") 74 ret <vscale x 4 x half> %a 75} 76declare <vscale x 4 x half> @llvm.experimental.constrained.round.nxv4f16(<vscale x 4 x half>, metadata) 77 78define <vscale x 8 x half> @round_nxv8f16(<vscale x 8 x half> %x) strictfp { 79; CHECK-LABEL: round_nxv8f16: 80; CHECK: # %bb.0: 81; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu 82; CHECK-NEXT: vmfne.vv v0, v8, v8 83; CHECK-NEXT: lui a0, %hi(.LCPI3_0) 84; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0) 85; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t 86; CHECK-NEXT: vfabs.v v10, v8 87; CHECK-NEXT: vmflt.vf v0, v10, fa5 88; CHECK-NEXT: fsrmi a0, 4 89; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma 90; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t 91; CHECK-NEXT: fsrm a0 92; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t 93; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu 94; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t 95; CHECK-NEXT: ret 96 %a = call <vscale x 8 x half> @llvm.experimental.constrained.round.nxv8f16(<vscale x 8 x half> %x, metadata !"fpexcept.strict") 97 ret <vscale x 8 x half> %a 98} 99declare <vscale x 8 x half> @llvm.experimental.constrained.round.nxv8f16(<vscale x 8 x half>, metadata) 100 101define <vscale x 16 x half> @round_nxv16f16(<vscale x 16 x half> %x) strictfp { 102; CHECK-LABEL: round_nxv16f16: 103; CHECK: # %bb.0: 104; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu 105; CHECK-NEXT: vmfne.vv v0, v8, v8 106; CHECK-NEXT: lui a0, %hi(.LCPI4_0) 107; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0) 108; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t 109; CHECK-NEXT: vfabs.v v12, v8 110; CHECK-NEXT: vmflt.vf v0, v12, fa5 111; CHECK-NEXT: fsrmi a0, 4 112; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma 113; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t 114; CHECK-NEXT: fsrm a0 115; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t 116; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu 117; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t 118; CHECK-NEXT: ret 119 %a = call <vscale x 16 x half> @llvm.experimental.constrained.round.nxv16f16(<vscale x 16 x half> %x, metadata !"fpexcept.strict") 120 ret <vscale x 16 x half> %a 121} 122declare <vscale x 16 x half> @llvm.experimental.constrained.round.nxv16f16(<vscale x 16 x half>, metadata) 123 124define <vscale x 32 x half> @round_nxv32f16(<vscale x 32 x half> %x) strictfp { 125; CHECK-LABEL: round_nxv32f16: 126; CHECK: # %bb.0: 127; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu 128; CHECK-NEXT: vmfne.vv v0, v8, v8 129; CHECK-NEXT: lui a0, %hi(.LCPI5_0) 130; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a0) 131; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t 132; CHECK-NEXT: vfabs.v v16, v8 133; CHECK-NEXT: vmflt.vf v0, v16, fa5 134; CHECK-NEXT: fsrmi a0, 4 135; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma 136; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t 137; CHECK-NEXT: fsrm a0 138; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t 139; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu 140; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t 141; CHECK-NEXT: ret 142 %a = call <vscale x 32 x half> @llvm.experimental.constrained.round.nxv32f16(<vscale x 32 x half> %x, metadata !"fpexcept.strict") 143 ret <vscale x 32 x half> %a 144} 145declare <vscale x 32 x half> @llvm.experimental.constrained.round.nxv32f16(<vscale x 32 x half>, metadata) 146 147define <vscale x 1 x float> @round_nxv1f32(<vscale x 1 x float> %x) strictfp { 148; CHECK-LABEL: round_nxv1f32: 149; CHECK: # %bb.0: 150; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu 151; CHECK-NEXT: vmfne.vv v0, v8, v8 152; CHECK-NEXT: lui a0, 307200 153; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t 154; CHECK-NEXT: fmv.w.x fa5, a0 155; CHECK-NEXT: vfabs.v v9, v8 156; CHECK-NEXT: vmflt.vf v0, v9, fa5 157; CHECK-NEXT: fsrmi a0, 4 158; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma 159; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t 160; CHECK-NEXT: fsrm a0 161; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t 162; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu 163; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t 164; CHECK-NEXT: ret 165 %a = call <vscale x 1 x float> @llvm.experimental.constrained.round.nxv1f32(<vscale x 1 x float> %x, metadata !"fpexcept.strict") 166 ret <vscale x 1 x float> %a 167} 168declare <vscale x 1 x float> @llvm.experimental.constrained.round.nxv1f32(<vscale x 1 x float>, metadata) 169 170define <vscale x 2 x float> @round_nxv2f32(<vscale x 2 x float> %x) strictfp { 171; CHECK-LABEL: round_nxv2f32: 172; CHECK: # %bb.0: 173; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu 174; CHECK-NEXT: vmfne.vv v0, v8, v8 175; CHECK-NEXT: lui a0, 307200 176; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t 177; CHECK-NEXT: fmv.w.x fa5, a0 178; CHECK-NEXT: vfabs.v v9, v8 179; CHECK-NEXT: vmflt.vf v0, v9, fa5 180; CHECK-NEXT: fsrmi a0, 4 181; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma 182; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t 183; CHECK-NEXT: fsrm a0 184; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t 185; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu 186; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t 187; CHECK-NEXT: ret 188 %a = call <vscale x 2 x float> @llvm.experimental.constrained.round.nxv2f32(<vscale x 2 x float> %x, metadata !"fpexcept.strict") 189 ret <vscale x 2 x float> %a 190} 191declare <vscale x 2 x float> @llvm.experimental.constrained.round.nxv2f32(<vscale x 2 x float>, metadata) 192 193define <vscale x 4 x float> @round_nxv4f32(<vscale x 4 x float> %x) strictfp { 194; CHECK-LABEL: round_nxv4f32: 195; CHECK: # %bb.0: 196; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu 197; CHECK-NEXT: vmfne.vv v0, v8, v8 198; CHECK-NEXT: lui a0, 307200 199; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t 200; CHECK-NEXT: fmv.w.x fa5, a0 201; CHECK-NEXT: vfabs.v v10, v8 202; CHECK-NEXT: vmflt.vf v0, v10, fa5 203; CHECK-NEXT: fsrmi a0, 4 204; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma 205; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t 206; CHECK-NEXT: fsrm a0 207; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t 208; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu 209; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t 210; CHECK-NEXT: ret 211 %a = call <vscale x 4 x float> @llvm.experimental.constrained.round.nxv4f32(<vscale x 4 x float> %x, metadata !"fpexcept.strict") 212 ret <vscale x 4 x float> %a 213} 214declare <vscale x 4 x float> @llvm.experimental.constrained.round.nxv4f32(<vscale x 4 x float>, metadata) 215 216define <vscale x 8 x float> @round_nxv8f32(<vscale x 8 x float> %x) strictfp { 217; CHECK-LABEL: round_nxv8f32: 218; CHECK: # %bb.0: 219; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu 220; CHECK-NEXT: vmfne.vv v0, v8, v8 221; CHECK-NEXT: lui a0, 307200 222; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t 223; CHECK-NEXT: fmv.w.x fa5, a0 224; CHECK-NEXT: vfabs.v v12, v8 225; CHECK-NEXT: vmflt.vf v0, v12, fa5 226; CHECK-NEXT: fsrmi a0, 4 227; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma 228; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t 229; CHECK-NEXT: fsrm a0 230; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t 231; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu 232; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t 233; CHECK-NEXT: ret 234 %a = call <vscale x 8 x float> @llvm.experimental.constrained.round.nxv8f32(<vscale x 8 x float> %x, metadata !"fpexcept.strict") 235 ret <vscale x 8 x float> %a 236} 237declare <vscale x 8 x float> @llvm.experimental.constrained.round.nxv8f32(<vscale x 8 x float>, metadata) 238 239define <vscale x 16 x float> @round_nxv16f32(<vscale x 16 x float> %x) strictfp { 240; CHECK-LABEL: round_nxv16f32: 241; CHECK: # %bb.0: 242; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu 243; CHECK-NEXT: vmfne.vv v0, v8, v8 244; CHECK-NEXT: lui a0, 307200 245; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t 246; CHECK-NEXT: fmv.w.x fa5, a0 247; CHECK-NEXT: vfabs.v v16, v8 248; CHECK-NEXT: vmflt.vf v0, v16, fa5 249; CHECK-NEXT: fsrmi a0, 4 250; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma 251; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t 252; CHECK-NEXT: fsrm a0 253; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t 254; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu 255; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t 256; CHECK-NEXT: ret 257 %a = call <vscale x 16 x float> @llvm.experimental.constrained.round.nxv16f32(<vscale x 16 x float> %x, metadata !"fpexcept.strict") 258 ret <vscale x 16 x float> %a 259} 260declare <vscale x 16 x float> @llvm.experimental.constrained.round.nxv16f32(<vscale x 16 x float>, metadata) 261 262define <vscale x 1 x double> @round_nxv1f64(<vscale x 1 x double> %x) strictfp { 263; CHECK-LABEL: round_nxv1f64: 264; CHECK: # %bb.0: 265; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu 266; CHECK-NEXT: vmfne.vv v0, v8, v8 267; CHECK-NEXT: lui a0, %hi(.LCPI11_0) 268; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0) 269; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t 270; CHECK-NEXT: vfabs.v v9, v8 271; CHECK-NEXT: vmflt.vf v0, v9, fa5 272; CHECK-NEXT: fsrmi a0, 4 273; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma 274; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t 275; CHECK-NEXT: fsrm a0 276; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t 277; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu 278; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t 279; CHECK-NEXT: ret 280 %a = call <vscale x 1 x double> @llvm.experimental.constrained.round.nxv1f64(<vscale x 1 x double> %x, metadata !"fpexcept.strict") 281 ret <vscale x 1 x double> %a 282} 283declare <vscale x 1 x double> @llvm.experimental.constrained.round.nxv1f64(<vscale x 1 x double>, metadata) 284 285define <vscale x 2 x double> @round_nxv2f64(<vscale x 2 x double> %x) strictfp { 286; CHECK-LABEL: round_nxv2f64: 287; CHECK: # %bb.0: 288; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu 289; CHECK-NEXT: vmfne.vv v0, v8, v8 290; CHECK-NEXT: lui a0, %hi(.LCPI12_0) 291; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0) 292; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t 293; CHECK-NEXT: vfabs.v v10, v8 294; CHECK-NEXT: vmflt.vf v0, v10, fa5 295; CHECK-NEXT: fsrmi a0, 4 296; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma 297; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t 298; CHECK-NEXT: fsrm a0 299; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t 300; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu 301; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t 302; CHECK-NEXT: ret 303 %a = call <vscale x 2 x double> @llvm.experimental.constrained.round.nxv2f64(<vscale x 2 x double> %x, metadata !"fpexcept.strict") 304 ret <vscale x 2 x double> %a 305} 306declare <vscale x 2 x double> @llvm.experimental.constrained.round.nxv2f64(<vscale x 2 x double>, metadata) 307 308define <vscale x 4 x double> @round_nxv4f64(<vscale x 4 x double> %x) strictfp { 309; CHECK-LABEL: round_nxv4f64: 310; CHECK: # %bb.0: 311; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu 312; CHECK-NEXT: vmfne.vv v0, v8, v8 313; CHECK-NEXT: lui a0, %hi(.LCPI13_0) 314; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0) 315; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t 316; CHECK-NEXT: vfabs.v v12, v8 317; CHECK-NEXT: vmflt.vf v0, v12, fa5 318; CHECK-NEXT: fsrmi a0, 4 319; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma 320; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t 321; CHECK-NEXT: fsrm a0 322; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t 323; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu 324; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t 325; CHECK-NEXT: ret 326 %a = call <vscale x 4 x double> @llvm.experimental.constrained.round.nxv4f64(<vscale x 4 x double> %x, metadata !"fpexcept.strict") 327 ret <vscale x 4 x double> %a 328} 329declare <vscale x 4 x double> @llvm.experimental.constrained.round.nxv4f64(<vscale x 4 x double>, metadata) 330 331define <vscale x 8 x double> @round_nxv8f64(<vscale x 8 x double> %x) strictfp { 332; CHECK-LABEL: round_nxv8f64: 333; CHECK: # %bb.0: 334; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu 335; CHECK-NEXT: vmfne.vv v0, v8, v8 336; CHECK-NEXT: lui a0, %hi(.LCPI14_0) 337; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0) 338; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t 339; CHECK-NEXT: vfabs.v v16, v8 340; CHECK-NEXT: vmflt.vf v0, v16, fa5 341; CHECK-NEXT: fsrmi a0, 4 342; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma 343; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t 344; CHECK-NEXT: fsrm a0 345; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t 346; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu 347; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t 348; CHECK-NEXT: ret 349 %a = call <vscale x 8 x double> @llvm.experimental.constrained.round.nxv8f64(<vscale x 8 x double> %x, metadata !"fpexcept.strict") 350 ret <vscale x 8 x double> %a 351} 352declare <vscale x 8 x double> @llvm.experimental.constrained.round.nxv8f64(<vscale x 8 x double>, metadata) 353