xref: /llvm-project/llvm/test/CodeGen/X86/pr86305.ll (revision 76569025dd8b026b3309dedbcaf877d16eace805)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
2; RUN: llc < %s -mtriple=x86_64-linux-gnu -mattr=avx512bf16 | FileCheck %s
3
4define void @add(ptr %pa, ptr %pb, ptr %pc) nounwind {
5; CHECK-LABEL: add:
6; CHECK:       # %bb.0:
7; CHECK-NEXT:    pushq %rbx
8; CHECK-NEXT:    movq %rdx, %rbx
9; CHECK-NEXT:    movzwl (%rsi), %eax
10; CHECK-NEXT:    shll $16, %eax
11; CHECK-NEXT:    vmovd %eax, %xmm0
12; CHECK-NEXT:    movzwl (%rdi), %eax
13; CHECK-NEXT:    shll $16, %eax
14; CHECK-NEXT:    vmovd %eax, %xmm1
15; CHECK-NEXT:    vaddss %xmm0, %xmm1, %xmm0
16; CHECK-NEXT:    callq __truncsfbf2@PLT
17; CHECK-NEXT:    vpextrw $0, %xmm0, (%rbx)
18; CHECK-NEXT:    popq %rbx
19; CHECK-NEXT:    retq
20  %a = load bfloat, ptr %pa
21  %b = load bfloat, ptr %pb
22  %add = fadd bfloat %a, %b
23  store bfloat %add, ptr %pc
24  ret void
25}
26
27define <4 x bfloat> @fptrunc_v4f32(<4 x float> %a) nounwind {
28; CHECK-LABEL: fptrunc_v4f32:
29; CHECK:       # %bb.0:
30; CHECK-NEXT:    pushq %rbp
31; CHECK-NEXT:    pushq %r14
32; CHECK-NEXT:    pushq %rbx
33; CHECK-NEXT:    subq $64, %rsp
34; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
35; CHECK-NEXT:    callq __truncsfbf2@PLT
36; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
37; CHECK-NEXT:    vmovshdup (%rsp), %xmm0 # 16-byte Folded Reload
38; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
39; CHECK-NEXT:    callq __truncsfbf2@PLT
40; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
41; CHECK-NEXT:    vpshufd $255, (%rsp), %xmm0 # 16-byte Folded Reload
42; CHECK-NEXT:    # xmm0 = mem[3,3,3,3]
43; CHECK-NEXT:    callq __truncsfbf2@PLT
44; CHECK-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
45; CHECK-NEXT:    callq __truncsfbf2@PLT
46; CHECK-NEXT:    vpextrw $0, %xmm0, %ebx
47; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
48; CHECK-NEXT:    vpextrw $0, %xmm0, %ebp
49; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
50; CHECK-NEXT:    vpextrw $0, %xmm0, %r14d
51; CHECK-NEXT:    vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
52; CHECK-NEXT:    # xmm0 = mem[1,0]
53; CHECK-NEXT:    callq __truncsfbf2@PLT
54; CHECK-NEXT:    vpextrw $0, %xmm0, %eax
55; CHECK-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
56; CHECK-NEXT:    vpinsrw $1, %r14d, %xmm0, %xmm0
57; CHECK-NEXT:    vpinsrw $2, %eax, %xmm0, %xmm0
58; CHECK-NEXT:    vpinsrw $3, %ebp, %xmm0, %xmm0
59; CHECK-NEXT:    vpinsrw $4, %ebx, %xmm0, %xmm0
60; CHECK-NEXT:    vpinsrw $5, %ebx, %xmm0, %xmm0
61; CHECK-NEXT:    vpinsrw $6, %ebx, %xmm0, %xmm0
62; CHECK-NEXT:    vpinsrw $7, %ebx, %xmm0, %xmm0
63; CHECK-NEXT:    addq $64, %rsp
64; CHECK-NEXT:    popq %rbx
65; CHECK-NEXT:    popq %r14
66; CHECK-NEXT:    popq %rbp
67; CHECK-NEXT:    retq
68  %b = fptrunc <4 x float> %a to <4 x bfloat>
69  ret <4 x bfloat> %b
70}
71