1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=i386-unknown | FileCheck %s --check-prefix=X86 3; RUN: llc < %s -mtriple=x86_64-unknown-gnux32 | FileCheck %s --check-prefixes=X32 4; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64 5 6define void @add(ptr %p, ptr %q) nounwind { 7; X86-LABEL: add: 8; X86: # %bb.0: 9; X86-NEXT: pushl %ebp 10; X86-NEXT: pushl %ebx 11; X86-NEXT: pushl %edi 12; X86-NEXT: pushl %esi 13; X86-NEXT: subl $8, %esp 14; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx 15; X86-NEXT: movl 28(%ecx), %eax 16; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 17; X86-NEXT: movl 24(%ecx), %eax 18; X86-NEXT: movl %eax, (%esp) # 4-byte Spill 19; X86-NEXT: movl 20(%ecx), %esi 20; X86-NEXT: movl 16(%ecx), %edi 21; X86-NEXT: movl 12(%ecx), %ebx 22; X86-NEXT: movl 8(%ecx), %ebp 23; X86-NEXT: movl (%ecx), %edx 24; X86-NEXT: movl 4(%ecx), %ecx 25; X86-NEXT: movl {{[0-9]+}}(%esp), %eax 26; X86-NEXT: addl %edx, (%eax) 27; X86-NEXT: adcl %ecx, 4(%eax) 28; X86-NEXT: adcl %ebp, 8(%eax) 29; X86-NEXT: adcl %ebx, 12(%eax) 30; X86-NEXT: adcl %edi, 16(%eax) 31; X86-NEXT: adcl %esi, 20(%eax) 32; X86-NEXT: movl (%esp), %ecx # 4-byte Reload 33; X86-NEXT: adcl %ecx, 24(%eax) 34; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload 35; X86-NEXT: adcl %ecx, 28(%eax) 36; X86-NEXT: addl $8, %esp 37; X86-NEXT: popl %esi 38; X86-NEXT: popl %edi 39; X86-NEXT: popl %ebx 40; X86-NEXT: popl %ebp 41; X86-NEXT: retl 42; 43; X32-LABEL: add: 44; X32: # %bb.0: 45; X32-NEXT: movq 24(%esi), %rax 46; X32-NEXT: movq 16(%esi), %rcx 47; X32-NEXT: movq (%esi), %rdx 48; X32-NEXT: movq 8(%esi), %rsi 49; X32-NEXT: addq %rdx, (%edi) 50; X32-NEXT: adcq %rsi, 8(%edi) 51; X32-NEXT: adcq %rcx, 16(%edi) 52; X32-NEXT: adcq %rax, 24(%edi) 53; X32-NEXT: retq 54; 55; X64-LABEL: add: 56; X64: # %bb.0: 57; X64-NEXT: movq 24(%rsi), %rax 58; X64-NEXT: movq 16(%rsi), %rcx 59; X64-NEXT: movq (%rsi), %rdx 60; X64-NEXT: movq 8(%rsi), %rsi 61; X64-NEXT: addq %rdx, (%rdi) 62; X64-NEXT: adcq %rsi, 8(%rdi) 63; X64-NEXT: adcq %rcx, 16(%rdi) 64; X64-NEXT: adcq %rax, 24(%rdi) 65; X64-NEXT: retq 66 %a = load i256, ptr %p 67 %b = load i256, ptr %q 68 %c = add i256 %a, %b 69 store i256 %c, ptr %p 70 ret void 71} 72define void @sub(ptr %p, ptr %q) nounwind { 73; X86-LABEL: sub: 74; X86: # %bb.0: 75; X86-NEXT: pushl %ebp 76; X86-NEXT: pushl %ebx 77; X86-NEXT: pushl %edi 78; X86-NEXT: pushl %esi 79; X86-NEXT: subl $8, %esp 80; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx 81; X86-NEXT: movl 28(%ecx), %eax 82; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 83; X86-NEXT: movl 24(%ecx), %eax 84; X86-NEXT: movl %eax, (%esp) # 4-byte Spill 85; X86-NEXT: movl 20(%ecx), %esi 86; X86-NEXT: movl 16(%ecx), %edi 87; X86-NEXT: movl 12(%ecx), %ebx 88; X86-NEXT: movl 8(%ecx), %ebp 89; X86-NEXT: movl (%ecx), %edx 90; X86-NEXT: movl 4(%ecx), %ecx 91; X86-NEXT: movl {{[0-9]+}}(%esp), %eax 92; X86-NEXT: subl %edx, (%eax) 93; X86-NEXT: sbbl %ecx, 4(%eax) 94; X86-NEXT: sbbl %ebp, 8(%eax) 95; X86-NEXT: sbbl %ebx, 12(%eax) 96; X86-NEXT: sbbl %edi, 16(%eax) 97; X86-NEXT: sbbl %esi, 20(%eax) 98; X86-NEXT: movl (%esp), %ecx # 4-byte Reload 99; X86-NEXT: sbbl %ecx, 24(%eax) 100; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload 101; X86-NEXT: sbbl %ecx, 28(%eax) 102; X86-NEXT: addl $8, %esp 103; X86-NEXT: popl %esi 104; X86-NEXT: popl %edi 105; X86-NEXT: popl %ebx 106; X86-NEXT: popl %ebp 107; X86-NEXT: retl 108; 109; X32-LABEL: sub: 110; X32: # %bb.0: 111; X32-NEXT: movq 24(%esi), %rax 112; X32-NEXT: movq 16(%esi), %rcx 113; X32-NEXT: movq (%esi), %rdx 114; X32-NEXT: movq 8(%esi), %rsi 115; X32-NEXT: subq %rdx, (%edi) 116; X32-NEXT: sbbq %rsi, 8(%edi) 117; X32-NEXT: sbbq %rcx, 16(%edi) 118; X32-NEXT: sbbq %rax, 24(%edi) 119; X32-NEXT: retq 120; 121; X64-LABEL: sub: 122; X64: # %bb.0: 123; X64-NEXT: movq 24(%rsi), %rax 124; X64-NEXT: movq 16(%rsi), %rcx 125; X64-NEXT: movq (%rsi), %rdx 126; X64-NEXT: movq 8(%rsi), %rsi 127; X64-NEXT: subq %rdx, (%rdi) 128; X64-NEXT: sbbq %rsi, 8(%rdi) 129; X64-NEXT: sbbq %rcx, 16(%rdi) 130; X64-NEXT: sbbq %rax, 24(%rdi) 131; X64-NEXT: retq 132 %a = load i256, ptr %p 133 %b = load i256, ptr %q 134 %c = sub i256 %a, %b 135 store i256 %c, ptr %p 136 ret void 137} 138