1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512fp16 --fp-contract=fast --enable-unsafe-fp-math | FileCheck %s 3 4define dso_local <32 x half> @test1(<32 x half> %acc.coerce, <32 x half> %lhs.coerce, <32 x half> %rhs.coerce) { 5; CHECK-LABEL: test1: 6; CHECK: # %bb.0: # %entry 7; CHECK-NEXT: vfmaddcph %zmm2, %zmm1, %zmm0 8; CHECK-NEXT: retq 9entry: 10 %0 = bitcast <32 x half> %lhs.coerce to <16 x float> 11 %1 = bitcast <32 x half> %rhs.coerce to <16 x float> 12 %2 = tail call fast <16 x float> @llvm.x86.avx512fp16.mask.vfmul.cph.512(<16 x float> %0, <16 x float> %1, <16 x float> zeroinitializer, i16 -1, i32 4) 13 %3 = bitcast <16 x float> %2 to <32 x half> 14 %add.i.i = fadd fast <32 x half> %3, %acc.coerce 15 ret <32 x half> %add.i.i 16} 17 18define dso_local <16 x half> @test2(<16 x half> %acc.coerce, <16 x half> %lhs.coerce, <16 x half> %rhs.coerce) { 19; CHECK-LABEL: test2: 20; CHECK: # %bb.0: # %entry 21; CHECK-NEXT: vfmaddcph %ymm2, %ymm1, %ymm0 22; CHECK-NEXT: retq 23entry: 24 %0 = bitcast <16 x half> %lhs.coerce to <8 x float> 25 %1 = bitcast <16 x half> %rhs.coerce to <8 x float> 26 %2 = tail call fast <8 x float> @llvm.x86.avx512fp16.mask.vfmul.cph.256(<8 x float> %0, <8 x float> %1, <8 x float> zeroinitializer, i8 -1) 27 %3 = bitcast <8 x float> %2 to <16 x half> 28 %add.i.i = fadd fast <16 x half> %3, %acc.coerce 29 ret <16 x half> %add.i.i 30} 31 32define dso_local <8 x half> @test3(<8 x half> %acc.coerce, <8 x half> %lhs.coerce, <8 x half> %rhs.coerce) { 33; CHECK-LABEL: test3: 34; CHECK: # %bb.0: # %entry 35; CHECK-NEXT: vfmaddcph %xmm2, %xmm1, %xmm0 36; CHECK-NEXT: retq 37entry: 38 %0 = bitcast <8 x half> %lhs.coerce to <4 x float> 39 %1 = bitcast <8 x half> %rhs.coerce to <4 x float> 40 %2 = tail call fast <4 x float> @llvm.x86.avx512fp16.mask.vfmul.cph.128(<4 x float> %0, <4 x float> %1, <4 x float> zeroinitializer, i8 -1) 41 %3 = bitcast <4 x float> %2 to <8 x half> 42 %add.i.i = fadd fast <8 x half> %3, %acc.coerce 43 ret <8 x half> %add.i.i 44} 45 46 47define dso_local <8 x half> @test4(<8 x half> %acc.coerce, <8 x half> %lhs.coerce, <8 x half> %rhs.coerce) { 48; CHECK-LABEL: test4: 49; CHECK: # %bb.0: # %entry 50; CHECK-NEXT: vfmaddcph %xmm2, %xmm1, %xmm0 51; CHECK-NEXT: retq 52entry: 53 %0 = bitcast <8 x half> %lhs.coerce to <4 x float> 54 %1 = bitcast <8 x half> %rhs.coerce to <4 x float> 55 %2 = tail call fast <4 x float> @llvm.x86.avx512fp16.mask.vfmul.cph.128(<4 x float> %0, <4 x float> %1, <4 x float> zeroinitializer, i8 -1) 56 %3 = bitcast <4 x float> %2 to <8 x half> 57 %add.i.i = fadd fast <8 x half> %acc.coerce, %3 58 ret <8 x half> %add.i.i 59} 60 61declare <16 x float> @llvm.x86.avx512fp16.mask.vfmul.cph.512(<16 x float>, <16 x float>, <16 x float>, i16, i32 immarg) 62declare <8 x float> @llvm.x86.avx512fp16.mask.vfmul.cph.256(<8 x float>, <8 x float>, <8 x float>, i8) 63declare <4 x float> @llvm.x86.avx512fp16.mask.vfmul.cph.128(<4 x float>, <4 x float>, <4 x float>, i8) 64