1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=i686-- | FileCheck %s --check-prefixes=X86 3; RUN: llc < %s -mtriple=x86_64-- | FileCheck %s --check-prefixes=X64 4 5; 6; fixed avg(x,y) = add(and(x,y),lshr(xor(x,y),1)) 7; 8; ext avg(x,y) = trunc(lshr(add(zext(x),zext(y)),1)) 9; 10 11define i8 @test_fixed_i8(i8 %a0, i8 %a1) nounwind { 12; X86-LABEL: test_fixed_i8: 13; X86: # %bb.0: 14; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx 15; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax 16; X86-NEXT: addl %ecx, %eax 17; X86-NEXT: shrl %eax 18; X86-NEXT: # kill: def $al killed $al killed $eax 19; X86-NEXT: retl 20; 21; X64-LABEL: test_fixed_i8: 22; X64: # %bb.0: 23; X64-NEXT: movzbl %sil, %ecx 24; X64-NEXT: movzbl %dil, %eax 25; X64-NEXT: addl %ecx, %eax 26; X64-NEXT: shrl %eax 27; X64-NEXT: # kill: def $al killed $al killed $eax 28; X64-NEXT: retq 29 %and = and i8 %a0, %a1 30 %xor = xor i8 %a0, %a1 31 %shift = lshr i8 %xor, 1 32 %res = add i8 %and, %shift 33 ret i8 %res 34} 35 36define i8 @test_ext_i8(i8 %a0, i8 %a1) nounwind { 37; X86-LABEL: test_ext_i8: 38; X86: # %bb.0: 39; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx 40; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax 41; X86-NEXT: addl %ecx, %eax 42; X86-NEXT: shrl %eax 43; X86-NEXT: # kill: def $al killed $al killed $eax 44; X86-NEXT: retl 45; 46; X64-LABEL: test_ext_i8: 47; X64: # %bb.0: 48; X64-NEXT: movzbl %sil, %ecx 49; X64-NEXT: movzbl %dil, %eax 50; X64-NEXT: addl %ecx, %eax 51; X64-NEXT: shrl %eax 52; X64-NEXT: # kill: def $al killed $al killed $eax 53; X64-NEXT: retq 54 %x0 = zext i8 %a0 to i16 55 %x1 = zext i8 %a1 to i16 56 %sum = add i16 %x0, %x1 57 %shift = lshr i16 %sum, 1 58 %res = trunc i16 %shift to i8 59 ret i8 %res 60} 61 62define i16 @test_fixed_i16(i16 %a0, i16 %a1) nounwind { 63; X86-LABEL: test_fixed_i16: 64; X86: # %bb.0: 65; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx 66; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax 67; X86-NEXT: addl %ecx, %eax 68; X86-NEXT: shrl %eax 69; X86-NEXT: # kill: def $ax killed $ax killed $eax 70; X86-NEXT: retl 71; 72; X64-LABEL: test_fixed_i16: 73; X64: # %bb.0: 74; X64-NEXT: movzwl %si, %ecx 75; X64-NEXT: movzwl %di, %eax 76; X64-NEXT: addl %ecx, %eax 77; X64-NEXT: shrl %eax 78; X64-NEXT: # kill: def $ax killed $ax killed $eax 79; X64-NEXT: retq 80 %and = and i16 %a0, %a1 81 %xor = xor i16 %a0, %a1 82 %shift = lshr i16 %xor, 1 83 %res = add i16 %and, %shift 84 ret i16 %res 85} 86 87define i16 @test_ext_i16(i16 %a0, i16 %a1) nounwind { 88; X86-LABEL: test_ext_i16: 89; X86: # %bb.0: 90; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx 91; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax 92; X86-NEXT: addl %ecx, %eax 93; X86-NEXT: shrl %eax 94; X86-NEXT: # kill: def $ax killed $ax killed $eax 95; X86-NEXT: retl 96; 97; X64-LABEL: test_ext_i16: 98; X64: # %bb.0: 99; X64-NEXT: movzwl %si, %ecx 100; X64-NEXT: movzwl %di, %eax 101; X64-NEXT: addl %ecx, %eax 102; X64-NEXT: shrl %eax 103; X64-NEXT: # kill: def $ax killed $ax killed $eax 104; X64-NEXT: retq 105 %x0 = zext i16 %a0 to i32 106 %x1 = zext i16 %a1 to i32 107 %sum = add i32 %x0, %x1 108 %shift = lshr i32 %sum, 1 109 %res = trunc i32 %shift to i16 110 ret i16 %res 111} 112 113define i32 @test_fixed_i32(i32 %a0, i32 %a1) nounwind { 114; X86-LABEL: test_fixed_i32: 115; X86: # %bb.0: 116; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx 117; X86-NEXT: movl {{[0-9]+}}(%esp), %eax 118; X86-NEXT: movl %eax, %edx 119; X86-NEXT: andl %ecx, %edx 120; X86-NEXT: xorl %ecx, %eax 121; X86-NEXT: shrl %eax 122; X86-NEXT: addl %edx, %eax 123; X86-NEXT: retl 124; 125; X64-LABEL: test_fixed_i32: 126; X64: # %bb.0: 127; X64-NEXT: movl %esi, %ecx 128; X64-NEXT: movl %edi, %eax 129; X64-NEXT: addq %rcx, %rax 130; X64-NEXT: shrq %rax 131; X64-NEXT: # kill: def $eax killed $eax killed $rax 132; X64-NEXT: retq 133 %and = and i32 %a0, %a1 134 %xor = xor i32 %a1, %a0 135 %shift = lshr i32 %xor, 1 136 %res = add i32 %and, %shift 137 ret i32 %res 138} 139 140define i32 @test_ext_i32(i32 %a0, i32 %a1) nounwind { 141; X86-LABEL: test_ext_i32: 142; X86: # %bb.0: 143; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx 144; X86-NEXT: movl {{[0-9]+}}(%esp), %eax 145; X86-NEXT: movl %eax, %edx 146; X86-NEXT: andl %ecx, %edx 147; X86-NEXT: xorl %ecx, %eax 148; X86-NEXT: shrl %eax 149; X86-NEXT: addl %edx, %eax 150; X86-NEXT: retl 151; 152; X64-LABEL: test_ext_i32: 153; X64: # %bb.0: 154; X64-NEXT: movl %esi, %ecx 155; X64-NEXT: movl %edi, %eax 156; X64-NEXT: addq %rcx, %rax 157; X64-NEXT: shrq %rax 158; X64-NEXT: # kill: def $eax killed $eax killed $rax 159; X64-NEXT: retq 160 %x0 = zext i32 %a0 to i64 161 %x1 = zext i32 %a1 to i64 162 %sum = add i64 %x0, %x1 163 %shift = lshr i64 %sum, 1 164 %res = trunc i64 %shift to i32 165 ret i32 %res 166} 167 168define i64 @test_fixed_i64(i64 %a0, i64 %a1) nounwind { 169; X86-LABEL: test_fixed_i64: 170; X86: # %bb.0: 171; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx 172; X86-NEXT: movl {{[0-9]+}}(%esp), %eax 173; X86-NEXT: addl {{[0-9]+}}(%esp), %ecx 174; X86-NEXT: adcl {{[0-9]+}}(%esp), %eax 175; X86-NEXT: setb %dl 176; X86-NEXT: movzbl %dl, %edx 177; X86-NEXT: shldl $31, %eax, %edx 178; X86-NEXT: shldl $31, %ecx, %eax 179; X86-NEXT: retl 180; 181; X64-LABEL: test_fixed_i64: 182; X64: # %bb.0: 183; X64-NEXT: movq %rdi, %rax 184; X64-NEXT: andq %rsi, %rax 185; X64-NEXT: xorq %rsi, %rdi 186; X64-NEXT: shrq %rdi 187; X64-NEXT: addq %rdi, %rax 188; X64-NEXT: retq 189 %and = and i64 %a0, %a1 190 %xor = xor i64 %a1, %a0 191 %shift = lshr i64 %xor, 1 192 %res = add i64 %and, %shift 193 ret i64 %res 194} 195 196define i64 @test_ext_i64(i64 %a0, i64 %a1) nounwind { 197; X86-LABEL: test_ext_i64: 198; X86: # %bb.0: 199; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx 200; X86-NEXT: movl {{[0-9]+}}(%esp), %eax 201; X86-NEXT: addl {{[0-9]+}}(%esp), %ecx 202; X86-NEXT: adcl {{[0-9]+}}(%esp), %eax 203; X86-NEXT: setb %dl 204; X86-NEXT: movzbl %dl, %edx 205; X86-NEXT: shldl $31, %eax, %edx 206; X86-NEXT: shldl $31, %ecx, %eax 207; X86-NEXT: retl 208; 209; X64-LABEL: test_ext_i64: 210; X64: # %bb.0: 211; X64-NEXT: movq %rdi, %rax 212; X64-NEXT: andq %rsi, %rax 213; X64-NEXT: xorq %rsi, %rdi 214; X64-NEXT: shrq %rdi 215; X64-NEXT: addq %rdi, %rax 216; X64-NEXT: retq 217 %x0 = zext i64 %a0 to i128 218 %x1 = zext i64 %a1 to i128 219 %sum = add i128 %x0, %x1 220 %shift = lshr i128 %sum, 1 221 %res = trunc i128 %shift to i64 222 ret i64 %res 223} 224