1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=i686 -mattr=cmov | FileCheck %s --check-prefix=X86 3; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s --check-prefix=X64 4 5declare i4 @llvm.uadd.sat.i4(i4, i4) 6declare i8 @llvm.uadd.sat.i8(i8, i8) 7declare i16 @llvm.uadd.sat.i16(i16, i16) 8declare i32 @llvm.uadd.sat.i32(i32, i32) 9declare i64 @llvm.uadd.sat.i64(i64, i64) 10declare <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32>, <4 x i32>) 11 12define i32 @func(i32 %x, i32 %y) nounwind { 13; X86-LABEL: func: 14; X86: # %bb.0: 15; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx 16; X86-NEXT: addl {{[0-9]+}}(%esp), %ecx 17; X86-NEXT: movl $-1, %eax 18; X86-NEXT: cmovael %ecx, %eax 19; X86-NEXT: retl 20; 21; X64-LABEL: func: 22; X64: # %bb.0: 23; X64-NEXT: addl %esi, %edi 24; X64-NEXT: movl $-1, %eax 25; X64-NEXT: cmovael %edi, %eax 26; X64-NEXT: retq 27 %tmp = call i32 @llvm.uadd.sat.i32(i32 %x, i32 %y) 28 ret i32 %tmp 29} 30 31define i64 @func2(i64 %x, i64 %y) nounwind { 32; X86-LABEL: func2: 33; X86: # %bb.0: 34; X86-NEXT: movl {{[0-9]+}}(%esp), %eax 35; X86-NEXT: movl {{[0-9]+}}(%esp), %edx 36; X86-NEXT: addl {{[0-9]+}}(%esp), %eax 37; X86-NEXT: adcl {{[0-9]+}}(%esp), %edx 38; X86-NEXT: movl $-1, %ecx 39; X86-NEXT: cmovbl %ecx, %edx 40; X86-NEXT: cmovbl %ecx, %eax 41; X86-NEXT: retl 42; 43; X64-LABEL: func2: 44; X64: # %bb.0: 45; X64-NEXT: addq %rsi, %rdi 46; X64-NEXT: movq $-1, %rax 47; X64-NEXT: cmovaeq %rdi, %rax 48; X64-NEXT: retq 49 %tmp = call i64 @llvm.uadd.sat.i64(i64 %x, i64 %y) 50 ret i64 %tmp 51} 52 53define zeroext i16 @func16(i16 zeroext %x, i16 zeroext %y) nounwind { 54; X86-LABEL: func16: 55; X86: # %bb.0: 56; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx 57; X86-NEXT: addw {{[0-9]+}}(%esp), %cx 58; X86-NEXT: movl $65535, %eax # imm = 0xFFFF 59; X86-NEXT: cmovael %ecx, %eax 60; X86-NEXT: # kill: def $ax killed $ax killed $eax 61; X86-NEXT: retl 62; 63; X64-LABEL: func16: 64; X64: # %bb.0: 65; X64-NEXT: addw %si, %di 66; X64-NEXT: movl $65535, %eax # imm = 0xFFFF 67; X64-NEXT: cmovael %edi, %eax 68; X64-NEXT: # kill: def $ax killed $ax killed $eax 69; X64-NEXT: retq 70 %tmp = call i16 @llvm.uadd.sat.i16(i16 %x, i16 %y) 71 ret i16 %tmp 72} 73 74define zeroext i8 @func8(i8 zeroext %x, i8 zeroext %y) nounwind { 75; X86-LABEL: func8: 76; X86: # %bb.0: 77; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax 78; X86-NEXT: addb {{[0-9]+}}(%esp), %al 79; X86-NEXT: movzbl %al, %ecx 80; X86-NEXT: movl $255, %eax 81; X86-NEXT: cmovael %ecx, %eax 82; X86-NEXT: # kill: def $al killed $al killed $eax 83; X86-NEXT: retl 84; 85; X64-LABEL: func8: 86; X64: # %bb.0: 87; X64-NEXT: addb %sil, %dil 88; X64-NEXT: movzbl %dil, %ecx 89; X64-NEXT: movl $255, %eax 90; X64-NEXT: cmovael %ecx, %eax 91; X64-NEXT: # kill: def $al killed $al killed $eax 92; X64-NEXT: retq 93 %tmp = call i8 @llvm.uadd.sat.i8(i8 %x, i8 %y) 94 ret i8 %tmp 95} 96 97define zeroext i4 @func3(i4 zeroext %x, i4 zeroext %y) nounwind { 98; X86-LABEL: func3: 99; X86: # %bb.0: 100; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax 101; X86-NEXT: addb {{[0-9]+}}(%esp), %al 102; X86-NEXT: movzbl %al, %ecx 103; X86-NEXT: cmpb $15, %al 104; X86-NEXT: movl $15, %eax 105; X86-NEXT: cmovbl %ecx, %eax 106; X86-NEXT: movzbl %al, %eax 107; X86-NEXT: retl 108; 109; X64-LABEL: func3: 110; X64: # %bb.0: 111; X64-NEXT: addb %sil, %dil 112; X64-NEXT: movzbl %dil, %eax 113; X64-NEXT: cmpb $15, %al 114; X64-NEXT: movl $15, %ecx 115; X64-NEXT: cmovbl %eax, %ecx 116; X64-NEXT: movzbl %cl, %eax 117; X64-NEXT: retq 118 %tmp = call i4 @llvm.uadd.sat.i4(i4 %x, i4 %y) 119 ret i4 %tmp 120} 121 122define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind { 123; X86-LABEL: vec: 124; X86: # %bb.0: 125; X86-NEXT: pushl %ebx 126; X86-NEXT: pushl %edi 127; X86-NEXT: pushl %esi 128; X86-NEXT: movl {{[0-9]+}}(%esp), %eax 129; X86-NEXT: movl {{[0-9]+}}(%esp), %edi 130; X86-NEXT: movl {{[0-9]+}}(%esp), %esi 131; X86-NEXT: movl {{[0-9]+}}(%esp), %edx 132; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx 133; X86-NEXT: addl {{[0-9]+}}(%esp), %ecx 134; X86-NEXT: movl $-1, %ebx 135; X86-NEXT: cmovbl %ebx, %ecx 136; X86-NEXT: addl {{[0-9]+}}(%esp), %edx 137; X86-NEXT: cmovbl %ebx, %edx 138; X86-NEXT: addl {{[0-9]+}}(%esp), %esi 139; X86-NEXT: cmovbl %ebx, %esi 140; X86-NEXT: addl {{[0-9]+}}(%esp), %edi 141; X86-NEXT: cmovbl %ebx, %edi 142; X86-NEXT: movl %edi, 12(%eax) 143; X86-NEXT: movl %esi, 8(%eax) 144; X86-NEXT: movl %edx, 4(%eax) 145; X86-NEXT: movl %ecx, (%eax) 146; X86-NEXT: popl %esi 147; X86-NEXT: popl %edi 148; X86-NEXT: popl %ebx 149; X86-NEXT: retl $4 150; 151; X64-LABEL: vec: 152; X64: # %bb.0: 153; X64-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] 154; X64-NEXT: movdqa %xmm0, %xmm3 155; X64-NEXT: pxor %xmm2, %xmm3 156; X64-NEXT: paddd %xmm1, %xmm0 157; X64-NEXT: pxor %xmm0, %xmm2 158; X64-NEXT: pcmpgtd %xmm2, %xmm3 159; X64-NEXT: por %xmm3, %xmm0 160; X64-NEXT: retq 161 %tmp = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %x, <4 x i32> %y) 162 ret <4 x i32> %tmp 163} 164