1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=avx512fp16,avx512vl -O3 | FileCheck %s --check-prefixes=CHECK,X86 3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512fp16,avx512vl -O3 | FileCheck %s --check-prefixes=CHECK,X64 4 5declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i1(<16 x i1>, metadata, metadata) 6declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i1(<16 x i1>, metadata, metadata) 7declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i8(<16 x i8>, metadata, metadata) 8declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i8(<16 x i8>, metadata, metadata) 9declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i16(<16 x i16>, metadata, metadata) 10declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i16(<16 x i16>, metadata, metadata) 11declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i32(<8 x i32>, metadata, metadata) 12declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i32(<8 x i32>, metadata, metadata) 13declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i64(<4 x i64>, metadata, metadata) 14declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i64(<4 x i64>, metadata, metadata) 15declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i64(<8 x i64>, metadata, metadata) 16declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i64(<8 x i64>, metadata, metadata) 17 18define <16 x half> @sitofp_v16i1_v16f16(<16 x i1> %x) #0 { 19; CHECK-LABEL: sitofp_v16i1_v16f16: 20; CHECK: # %bb.0: 21; CHECK-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero 22; CHECK-NEXT: vpsllw $15, %ymm0, %ymm0 23; CHECK-NEXT: vpsraw $15, %ymm0, %ymm0 24; CHECK-NEXT: vcvtw2ph %ymm0, %ymm0 25; CHECK-NEXT: ret{{[l|q]}} 26 %result = call <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i1(<16 x i1> %x, 27 metadata !"round.dynamic", 28 metadata !"fpexcept.strict") #0 29 ret <16 x half> %result 30} 31 32define <16 x half> @uitofp_v16i1_v16f16(<16 x i1> %x) #0 { 33; X86-LABEL: uitofp_v16i1_v16f16: 34; X86: # %bb.0: 35; X86-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0 36; X86-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero 37; X86-NEXT: vcvtw2ph %ymm0, %ymm0 38; X86-NEXT: retl 39; 40; X64-LABEL: uitofp_v16i1_v16f16: 41; X64: # %bb.0: 42; X64-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0 43; X64-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero 44; X64-NEXT: vcvtw2ph %ymm0, %ymm0 45; X64-NEXT: retq 46 %result = call <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i1(<16 x i1> %x, 47 metadata !"round.dynamic", 48 metadata !"fpexcept.strict") #0 49 ret <16 x half> %result 50} 51 52define <16 x half> @sitofp_v16i8_v16f16(<16 x i8> %x) #0 { 53; CHECK-LABEL: sitofp_v16i8_v16f16: 54; CHECK: # %bb.0: 55; CHECK-NEXT: vpmovsxbw %xmm0, %ymm0 56; CHECK-NEXT: vcvtw2ph %ymm0, %ymm0 57; CHECK-NEXT: ret{{[l|q]}} 58 %result = call <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i8(<16 x i8> %x, 59 metadata !"round.dynamic", 60 metadata !"fpexcept.strict") #0 61 ret <16 x half> %result 62} 63 64define <16 x half> @uitofp_v16i8_v16f16(<16 x i8> %x) #0 { 65; CHECK-LABEL: uitofp_v16i8_v16f16: 66; CHECK: # %bb.0: 67; CHECK-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero 68; CHECK-NEXT: vcvtw2ph %ymm0, %ymm0 69; CHECK-NEXT: ret{{[l|q]}} 70 %result = call <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i8(<16 x i8> %x, 71 metadata !"round.dynamic", 72 metadata !"fpexcept.strict") #0 73 ret <16 x half> %result 74} 75 76define <16 x half> @sitofp_v16i16_v16f16(<16 x i16> %x) #0 { 77; CHECK-LABEL: sitofp_v16i16_v16f16: 78; CHECK: # %bb.0: 79; CHECK-NEXT: vcvtw2ph %ymm0, %ymm0 80; CHECK-NEXT: ret{{[l|q]}} 81 %result = call <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i16(<16 x i16> %x, 82 metadata !"round.dynamic", 83 metadata !"fpexcept.strict") #0 84 ret <16 x half> %result 85} 86 87define <16 x half> @uitofp_v16i16_v16f16(<16 x i16> %x) #0 { 88; CHECK-LABEL: uitofp_v16i16_v16f16: 89; CHECK: # %bb.0: 90; CHECK-NEXT: vcvtuw2ph %ymm0, %ymm0 91; CHECK-NEXT: ret{{[l|q]}} 92 %result = call <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i16(<16 x i16> %x, 93 metadata !"round.dynamic", 94 metadata !"fpexcept.strict") #0 95 ret <16 x half> %result 96} 97 98define <8 x half> @sitofp_v8i32_v8f16(<8 x i32> %x) #0 { 99; CHECK-LABEL: sitofp_v8i32_v8f16: 100; CHECK: # %bb.0: 101; CHECK-NEXT: vcvtdq2ph %ymm0, %xmm0 102; CHECK-NEXT: vzeroupper 103; CHECK-NEXT: ret{{[l|q]}} 104 %result = call <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i32(<8 x i32> %x, 105 metadata !"round.dynamic", 106 metadata !"fpexcept.strict") #0 107 ret <8 x half> %result 108} 109 110define <8 x half> @uitofp_v8i32_v8f16(<8 x i32> %x) #0 { 111; CHECK-LABEL: uitofp_v8i32_v8f16: 112; CHECK: # %bb.0: 113; CHECK-NEXT: vcvtudq2ph %ymm0, %xmm0 114; CHECK-NEXT: vzeroupper 115; CHECK-NEXT: ret{{[l|q]}} 116 %result = call <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i32(<8 x i32> %x, 117 metadata !"round.dynamic", 118 metadata !"fpexcept.strict") #0 119 ret <8 x half> %result 120} 121 122define <4 x half> @sitofp_v4i64_v4f16(<4 x i64> %x) #0 { 123; CHECK-LABEL: sitofp_v4i64_v4f16: 124; CHECK: # %bb.0: 125; CHECK-NEXT: vcvtqq2ph %ymm0, %xmm0 126; CHECK-NEXT: vzeroupper 127; CHECK-NEXT: ret{{[l|q]}} 128 %result = call <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i64(<4 x i64> %x, 129 metadata !"round.dynamic", 130 metadata !"fpexcept.strict") #0 131 ret <4 x half> %result 132} 133 134define <4 x half> @uitofp_v4i64_v4f16(<4 x i64> %x) #0 { 135; CHECK-LABEL: uitofp_v4i64_v4f16: 136; CHECK: # %bb.0: 137; CHECK-NEXT: vcvtuqq2ph %ymm0, %xmm0 138; CHECK-NEXT: vzeroupper 139; CHECK-NEXT: ret{{[l|q]}} 140 %result = call <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i64(<4 x i64> %x, 141 metadata !"round.dynamic", 142 metadata !"fpexcept.strict") #0 143 ret <4 x half> %result 144} 145 146define <8 x half> @sitofp_v8i64_v8f16(<8 x i64> %x) #1 { 147; CHECK-LABEL: sitofp_v8i64_v8f16: 148; CHECK: # %bb.0: 149; CHECK-NEXT: vcvtqq2ph %ymm1, %xmm1 150; CHECK-NEXT: vcvtqq2ph %ymm0, %xmm0 151; CHECK-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] 152; CHECK-NEXT: vzeroupper 153; CHECK-NEXT: ret{{[l|q]}} 154 %result = call <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i64(<8 x i64> %x, 155 metadata !"round.dynamic", 156 metadata !"fpexcept.strict") #0 157 ret <8 x half> %result 158} 159 160define <8 x half> @uitofp_v8i64_v8f16(<8 x i64> %x) #1 { 161; CHECK-LABEL: uitofp_v8i64_v8f16: 162; CHECK: # %bb.0: 163; CHECK-NEXT: vcvtuqq2ph %ymm1, %xmm1 164; CHECK-NEXT: vcvtuqq2ph %ymm0, %xmm0 165; CHECK-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] 166; CHECK-NEXT: vzeroupper 167; CHECK-NEXT: ret{{[l|q]}} 168 %result = call <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i64(<8 x i64> %x, 169 metadata !"round.dynamic", 170 metadata !"fpexcept.strict") #0 171 ret <8 x half> %result 172} 173 174attributes #0 = { strictfp } 175attributes #1 = { strictfp "min-legal-vector-width"="256" "prefer-vector-width"="256" } 176