1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512fp16 -mattr=+avx512vl -O3 | FileCheck %s 3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512fp16 -mattr=+avx512vl -O3 | FileCheck %s 4 5declare <16 x half> @llvm.experimental.constrained.fadd.v16f16(<16 x half>, <16 x half>, metadata, metadata) 6declare <16 x half> @llvm.experimental.constrained.fsub.v16f16(<16 x half>, <16 x half>, metadata, metadata) 7declare <16 x half> @llvm.experimental.constrained.fmul.v16f16(<16 x half>, <16 x half>, metadata, metadata) 8declare <16 x half> @llvm.experimental.constrained.fdiv.v16f16(<16 x half>, <16 x half>, metadata, metadata) 9declare <16 x half> @llvm.experimental.constrained.sqrt.v16f16(<16 x half>, metadata, metadata) 10declare <16 x half> @llvm.experimental.constrained.fma.v16f16(<16 x half>, <16 x half>, <16 x half>, metadata, metadata) 11declare <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f16(<4 x half>, metadata) 12declare <8 x float> @llvm.experimental.constrained.fpext.v8f32.v8f16(<8 x half>, metadata) 13declare <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f64(<4 x double>, metadata, metadata) 14declare <8 x half> @llvm.experimental.constrained.fptrunc.v8f16.v8f32(<8 x float>, metadata, metadata) 15declare <16 x half> @llvm.experimental.constrained.ceil.v16f16(<16 x half>, metadata) 16declare <16 x half> @llvm.experimental.constrained.floor.v16f16(<16 x half>, metadata) 17declare <16 x half> @llvm.experimental.constrained.trunc.v16f16(<16 x half>, metadata) 18declare <16 x half> @llvm.experimental.constrained.rint.v16f16(<16 x half>, metadata, metadata) 19declare <16 x half> @llvm.experimental.constrained.nearbyint.v16f16(<16 x half>, metadata, metadata) 20 21define <16 x half> @f2(<16 x half> %a, <16 x half> %b) #0 { 22; CHECK-LABEL: f2: 23; CHECK: # %bb.0: 24; CHECK-NEXT: vaddph %ymm1, %ymm0, %ymm0 25; CHECK-NEXT: ret{{[l|q]}} 26 %ret = call <16 x half> @llvm.experimental.constrained.fadd.v16f16(<16 x half> %a, <16 x half> %b, 27 metadata !"round.dynamic", 28 metadata !"fpexcept.strict") #0 29 ret <16 x half> %ret 30} 31 32define <16 x half> @f4(<16 x half> %a, <16 x half> %b) #0 { 33; CHECK-LABEL: f4: 34; CHECK: # %bb.0: 35; CHECK-NEXT: vsubph %ymm1, %ymm0, %ymm0 36; CHECK-NEXT: ret{{[l|q]}} 37 %ret = call <16 x half> @llvm.experimental.constrained.fsub.v16f16(<16 x half> %a, <16 x half> %b, 38 metadata !"round.dynamic", 39 metadata !"fpexcept.strict") #0 40 ret <16 x half> %ret 41} 42 43define <16 x half> @f6(<16 x half> %a, <16 x half> %b) #0 { 44; CHECK-LABEL: f6: 45; CHECK: # %bb.0: 46; CHECK-NEXT: vmulph %ymm1, %ymm0, %ymm0 47; CHECK-NEXT: ret{{[l|q]}} 48 %ret = call <16 x half> @llvm.experimental.constrained.fmul.v16f16(<16 x half> %a, <16 x half> %b, 49 metadata !"round.dynamic", 50 metadata !"fpexcept.strict") #0 51 ret <16 x half> %ret 52} 53 54define <16 x half> @f8(<16 x half> %a, <16 x half> %b) #0 { 55; CHECK-LABEL: f8: 56; CHECK: # %bb.0: 57; CHECK-NEXT: vdivph %ymm1, %ymm0, %ymm0 58; CHECK-NEXT: ret{{[l|q]}} 59 %ret = call <16 x half> @llvm.experimental.constrained.fdiv.v16f16(<16 x half> %a, <16 x half> %b, 60 metadata !"round.dynamic", 61 metadata !"fpexcept.strict") #0 62 ret <16 x half> %ret 63} 64 65 66define <16 x half> @f10(<16 x half> %a) #0 { 67; CHECK-LABEL: f10: 68; CHECK: # %bb.0: 69; CHECK-NEXT: vsqrtph %ymm0, %ymm0 70; CHECK-NEXT: ret{{[l|q]}} 71 %ret = call <16 x half> @llvm.experimental.constrained.sqrt.v16f16( 72 <16 x half> %a, 73 metadata !"round.dynamic", 74 metadata !"fpexcept.strict") #0 75 ret <16 x half > %ret 76} 77 78define <4 x double> @f11(<4 x half> %a) #0 { 79; CHECK-LABEL: f11: 80; CHECK: # %bb.0: 81; CHECK-NEXT: vcvtph2pd %xmm0, %ymm0 82; CHECK-NEXT: ret{{[l|q]}} 83 %ret = call <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f16( 84 <4 x half> %a, 85 metadata !"fpexcept.strict") #0 86 ret <4 x double> %ret 87} 88 89define <4 x half> @f12(<4 x double> %a) #0 { 90; CHECK-LABEL: f12: 91; CHECK: # %bb.0: 92; CHECK-NEXT: vcvtpd2ph %ymm0, %xmm0 93; CHECK-NEXT: vzeroupper 94; CHECK-NEXT: ret{{[l|q]}} 95 %ret = call <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f64( 96 <4 x double> %a, 97 metadata !"round.dynamic", 98 metadata !"fpexcept.strict") #0 99 ret <4 x half> %ret 100} 101 102define <16 x half> @f13(<16 x half> %a, <16 x half> %b, <16 x half> %c) #0 { 103; CHECK-LABEL: f13: 104; CHECK: # %bb.0: 105; CHECK-NEXT: vfmadd213ph %ymm2, %ymm1, %ymm0 106; CHECK-NEXT: ret{{[l|q]}} 107 %res = call <16 x half> @llvm.experimental.constrained.fma.v16f16(<16 x half> %a, <16 x half> %b, <16 x half> %c, 108 metadata !"round.dynamic", 109 metadata !"fpexcept.strict") #0 110 ret <16 x half> %res 111} 112 113define <8 x float> @f14(<8 x half> %a) #0 { 114; CHECK-LABEL: f14: 115; CHECK: # %bb.0: 116; CHECK-NEXT: vcvtph2psx %xmm0, %ymm0 117; CHECK-NEXT: ret{{[l|q]}} 118 %ret = call <8 x float> @llvm.experimental.constrained.fpext.v8f32.v8f16( 119 <8 x half> %a, 120 metadata !"fpexcept.strict") #0 121 ret <8 x float> %ret 122} 123 124define <8 x half> @f15(<8 x float> %a) #0 { 125; CHECK-LABEL: f15: 126; CHECK: # %bb.0: 127; CHECK-NEXT: vcvtps2phx %ymm0, %xmm0 128; CHECK-NEXT: vzeroupper 129; CHECK-NEXT: ret{{[l|q]}} 130 %ret = call <8 x half> @llvm.experimental.constrained.fptrunc.v8f16.v8f32( 131 <8 x float> %a, 132 metadata !"round.dynamic", 133 metadata !"fpexcept.strict") #0 134 ret <8 x half> %ret 135} 136 137define <16 x half> @fceilv16f16(<16 x half> %f) #0 { 138; CHECK-LABEL: fceilv16f16: 139; CHECK: # %bb.0: 140; CHECK-NEXT: vrndscaleph $10, %ymm0, %ymm0 141; CHECK-NEXT: ret{{[l|q]}} 142 %res = call <16 x half> @llvm.experimental.constrained.ceil.v16f16( 143 <16 x half> %f, metadata !"fpexcept.strict") #0 144 ret <16 x half> %res 145} 146 147define <16 x half> @ffloorv16f16(<16 x half> %f) #0 { 148; CHECK-LABEL: ffloorv16f16: 149; CHECK: # %bb.0: 150; CHECK-NEXT: vrndscaleph $9, %ymm0, %ymm0 151; CHECK-NEXT: ret{{[l|q]}} 152 %res = call <16 x half> @llvm.experimental.constrained.floor.v16f16( 153 <16 x half> %f, metadata !"fpexcept.strict") #0 154 ret <16 x half> %res 155} 156 157 158define <16 x half> @ftruncv16f16(<16 x half> %f) #0 { 159; CHECK-LABEL: ftruncv16f16: 160; CHECK: # %bb.0: 161; CHECK-NEXT: vrndscaleph $11, %ymm0, %ymm0 162; CHECK-NEXT: ret{{[l|q]}} 163 %res = call <16 x half> @llvm.experimental.constrained.trunc.v16f16( 164 <16 x half> %f, metadata !"fpexcept.strict") #0 165 ret <16 x half> %res 166} 167 168define <16 x half> @frintv16f16(<16 x half> %f) #0 { 169; CHECK-LABEL: frintv16f16: 170; CHECK: # %bb.0: 171; CHECK-NEXT: vrndscaleph $4, %ymm0, %ymm0 172; CHECK-NEXT: ret{{[l|q]}} 173 %res = call <16 x half> @llvm.experimental.constrained.rint.v16f16( 174 <16 x half> %f, 175 metadata !"round.dynamic", metadata !"fpexcept.strict") #0 176 ret <16 x half> %res 177} 178 179define <16 x half> @fnearbyintv16f16(<16 x half> %f) #0 { 180; CHECK-LABEL: fnearbyintv16f16: 181; CHECK: # %bb.0: 182; CHECK-NEXT: vrndscaleph $12, %ymm0, %ymm0 183; CHECK-NEXT: ret{{[l|q]}} 184 %res = call <16 x half> @llvm.experimental.constrained.nearbyint.v16f16( 185 <16 x half> %f, 186 metadata !"round.dynamic", metadata !"fpexcept.strict") #0 187 ret <16 x half> %res 188} 189 190attributes #0 = { strictfp } 191