1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X32 3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X64 4 5define i128 @mulhioverflow(i64 %a, i64 %b, i64 %c) nounwind { 6; X32-LABEL: mulhioverflow: 7; X32: # %bb.0: 8; X32-NEXT: pushl %ebp 9; X32-NEXT: pushl %ebx 10; X32-NEXT: pushl %edi 11; X32-NEXT: pushl %esi 12; X32-NEXT: movl {{[0-9]+}}(%esp), %esi 13; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx 14; X32-NEXT: movl {{[0-9]+}}(%esp), %edi 15; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx 16; X32-NEXT: movl %esi, %eax 17; X32-NEXT: mull %edi 18; X32-NEXT: movl %edx, %ebp 19; X32-NEXT: movl %ebx, %eax 20; X32-NEXT: mull %edi 21; X32-NEXT: movl %edx, %edi 22; X32-NEXT: movl %eax, %ebx 23; X32-NEXT: addl %ebp, %ebx 24; X32-NEXT: adcl $0, %edi 25; X32-NEXT: movl %esi, %eax 26; X32-NEXT: mull %ecx 27; X32-NEXT: movl %edx, %esi 28; X32-NEXT: addl %ebx, %eax 29; X32-NEXT: adcl %edi, %esi 30; X32-NEXT: setb %bl 31; X32-NEXT: movl {{[0-9]+}}(%esp), %eax 32; X32-NEXT: mull %ecx 33; X32-NEXT: addl %esi, %eax 34; X32-NEXT: movzbl %bl, %ecx 35; X32-NEXT: adcl %ecx, %edx 36; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx 37; X32-NEXT: andl $1, %ecx 38; X32-NEXT: addl %eax, %ecx 39; X32-NEXT: movl {{[0-9]+}}(%esp), %eax 40; X32-NEXT: movl %ecx, (%eax) 41; X32-NEXT: adcl $0, %edx 42; X32-NEXT: movl %edx, 4(%eax) 43; X32-NEXT: setb %cl 44; X32-NEXT: movzbl %cl, %ecx 45; X32-NEXT: movl %ecx, 8(%eax) 46; X32-NEXT: movl $0, 12(%eax) 47; X32-NEXT: popl %esi 48; X32-NEXT: popl %edi 49; X32-NEXT: popl %ebx 50; X32-NEXT: popl %ebp 51; X32-NEXT: retl $4 52; 53; X64-LABEL: mulhioverflow: 54; X64: # %bb.0: 55; X64-NEXT: movq %rdx, %rcx 56; X64-NEXT: movq %rdi, %rax 57; X64-NEXT: mulq %rsi 58; X64-NEXT: andl $1, %ecx 59; X64-NEXT: addq %rdx, %rcx 60; X64-NEXT: movq %rcx, %rax 61; X64-NEXT: xorl %edx, %edx 62; X64-NEXT: retq 63 %1 = zext i64 %a to i128 64 %2 = zext i64 %b to i128 65 %3 = mul i128 %1, %2 66 %4 = lshr i128 %3, 64 67 %5 = and i64 %c, 1 68 %6 = zext i64 %5 to i128 69 %7 = add i128 %4, %6 70 ret i128 %7 71} 72