xref: /llvm-project/llvm/test/CodeGen/X86/overflow.ll (revision 77cfb4a85f09a8330e5901b37e36b5e3ca597f9d)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X32
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X64
4
5define i128 @mulhioverflow(i64 %a, i64 %b, i64 %c) nounwind {
6; X32-LABEL: mulhioverflow:
7; X32:       # BB#0:
8; X32-NEXT:    pushl %ebp
9; X32-NEXT:    movl %esp, %ebp
10; X32-NEXT:    pushl %edi
11; X32-NEXT:    pushl %esi
12; X32-NEXT:    andl $-8, %esp
13; X32-NEXT:    subl $16, %esp
14; X32-NEXT:    movl 8(%ebp), %esi
15; X32-NEXT:    movl 28(%ebp), %edi
16; X32-NEXT:    movl %esp, %eax
17; X32-NEXT:    pushl $0
18; X32-NEXT:    pushl $0
19; X32-NEXT:    pushl 24(%ebp)
20; X32-NEXT:    pushl 20(%ebp)
21; X32-NEXT:    pushl $0
22; X32-NEXT:    pushl $0
23; X32-NEXT:    pushl 16(%ebp)
24; X32-NEXT:    pushl 12(%ebp)
25; X32-NEXT:    pushl %eax
26; X32-NEXT:    calll __multi3
27; X32-NEXT:    addl $32, %esp
28; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
29; X32-NEXT:    andl $1, %edi
30; X32-NEXT:    addl {{[0-9]+}}(%esp), %edi
31; X32-NEXT:    adcl $0, %eax
32; X32-NEXT:    setb %cl
33; X32-NEXT:    movzbl %cl, %ecx
34; X32-NEXT:    movl %edi, (%esi)
35; X32-NEXT:    movl %eax, 4(%esi)
36; X32-NEXT:    movl %ecx, 8(%esi)
37; X32-NEXT:    movl $0, 12(%esi)
38; X32-NEXT:    movl %esi, %eax
39; X32-NEXT:    leal -8(%ebp), %esp
40; X32-NEXT:    popl %esi
41; X32-NEXT:    popl %edi
42; X32-NEXT:    popl %ebp
43; X32-NEXT:    retl $4
44;
45; X64-LABEL: mulhioverflow:
46; X64:       # BB#0:
47; X64-NEXT:    movq %rdx, %rcx
48; X64-NEXT:    movq %rdi, %rax
49; X64-NEXT:    mulq %rsi
50; X64-NEXT:    andl $1, %ecx
51; X64-NEXT:    leaq (%rcx,%rdx), %rax
52; X64-NEXT:    xorl %edx, %edx
53; X64-NEXT:    retq
54  %1 = zext i64 %a to i128
55  %2 = zext i64 %b to i128
56  %3 = mul i128 %1, %2
57  %4 = lshr i128 %3, 64
58  %5 = and i64 %c, 1
59  %6 = zext i64 %5 to i128
60  %7 = add i128 %4, %6
61  ret i128 %7
62}
63