1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=sse4.2 | FileCheck %s --check-prefix=FAST 3; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=ssse3 | FileCheck %s --check-prefix=SLOW_32 4; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=ssse3 | FileCheck %s --check-prefix=SLOW_64 5 6define void @bork(ptr nocapture align 4 %dst) nounwind { 7; FAST-LABEL: bork: 8; FAST: # %bb.0: 9; FAST-NEXT: movl {{[0-9]+}}(%esp), %eax 10; FAST-NEXT: xorps %xmm0, %xmm0 11; FAST-NEXT: movups %xmm0, 64(%eax) 12; FAST-NEXT: movups %xmm0, 48(%eax) 13; FAST-NEXT: movups %xmm0, 32(%eax) 14; FAST-NEXT: movups %xmm0, 16(%eax) 15; FAST-NEXT: movups %xmm0, (%eax) 16; FAST-NEXT: retl 17; 18; SLOW_32-LABEL: bork: 19; SLOW_32: # %bb.0: 20; SLOW_32-NEXT: movl {{[0-9]+}}(%esp), %eax 21; SLOW_32-NEXT: xorps %xmm0, %xmm0 22; SLOW_32-NEXT: movsd %xmm0, 72(%eax) 23; SLOW_32-NEXT: movsd %xmm0, 64(%eax) 24; SLOW_32-NEXT: movsd %xmm0, 56(%eax) 25; SLOW_32-NEXT: movsd %xmm0, 48(%eax) 26; SLOW_32-NEXT: movsd %xmm0, 40(%eax) 27; SLOW_32-NEXT: movsd %xmm0, 32(%eax) 28; SLOW_32-NEXT: movsd %xmm0, 24(%eax) 29; SLOW_32-NEXT: movsd %xmm0, 16(%eax) 30; SLOW_32-NEXT: movsd %xmm0, 8(%eax) 31; SLOW_32-NEXT: movsd %xmm0, (%eax) 32; SLOW_32-NEXT: retl 33; 34; SLOW_64-LABEL: bork: 35; SLOW_64: # %bb.0: 36; SLOW_64-NEXT: movq $0, 72(%rdi) 37; SLOW_64-NEXT: movq $0, 64(%rdi) 38; SLOW_64-NEXT: movq $0, 56(%rdi) 39; SLOW_64-NEXT: movq $0, 48(%rdi) 40; SLOW_64-NEXT: movq $0, 40(%rdi) 41; SLOW_64-NEXT: movq $0, 32(%rdi) 42; SLOW_64-NEXT: movq $0, 24(%rdi) 43; SLOW_64-NEXT: movq $0, 16(%rdi) 44; SLOW_64-NEXT: movq $0, 8(%rdi) 45; SLOW_64-NEXT: movq $0, (%rdi) 46; SLOW_64-NEXT: retq 47 call void @llvm.memset.p0.i64(ptr align 4 %dst, i8 0, i64 80, i1 false) 48 ret void 49} 50 51declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind 52 53