xref: /llvm-project/llvm/test/CodeGen/X86/maskmovdqu.ll (revision 2f448bf509432c1a19ec46ab8cbc7353c03c6280)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686--    -mattr=+sse2,-avx | FileCheck %s --check-prefix=i686_SSE2
3; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2,-avx | FileCheck %s --check-prefix=x86_64_SSE2
4; RUN: llc < %s -mtriple=x86_64--gnux32 -mattr=+sse2,-avx | FileCheck %s --check-prefix=x86_x32_SSE2
5; RUN: llc < %s -mtriple=i686--    -mattr=+avx | FileCheck %s --check-prefix=i686_AVX
6; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefix=x86_64_AVX
7; RUN: llc < %s -mtriple=x86_64--gnux32 -mattr=+avx | FileCheck %s --check-prefix=x86_x32_AVX
8; rdar://6573467
9
10define void @test(<16 x i8> %a, <16 x i8> %b, i32 %dummy, ptr %c) nounwind {
11; i686_SSE2-LABEL: test:
12; i686_SSE2:       # %bb.0: # %entry
13; i686_SSE2-NEXT:    pushl %edi
14; i686_SSE2-NEXT:    movl {{[0-9]+}}(%esp), %edi
15; i686_SSE2-NEXT:    maskmovdqu %xmm1, %xmm0
16; i686_SSE2-NEXT:    popl %edi
17; i686_SSE2-NEXT:    retl
18;
19; x86_64_SSE2-LABEL: test:
20; x86_64_SSE2:       # %bb.0: # %entry
21; x86_64_SSE2-NEXT:    movq %rsi, %rdi
22; x86_64_SSE2-NEXT:    maskmovdqu %xmm1, %xmm0
23; x86_64_SSE2-NEXT:    retq
24;
25; x86_x32_SSE2-LABEL: test:
26; x86_x32_SSE2:       # %bb.0: # %entry
27; x86_x32_SSE2-NEXT:    movq %rsi, %rdi
28; x86_x32_SSE2-NEXT:    # kill: def $edi killed $edi killed $rdi
29; x86_x32_SSE2-NEXT:    addr32 maskmovdqu %xmm1, %xmm0
30; x86_x32_SSE2-NEXT:    retq
31;
32; i686_AVX-LABEL: test:
33; i686_AVX:       # %bb.0: # %entry
34; i686_AVX-NEXT:    pushl %edi
35; i686_AVX-NEXT:    movl {{[0-9]+}}(%esp), %edi
36; i686_AVX-NEXT:    vmaskmovdqu %xmm1, %xmm0
37; i686_AVX-NEXT:    popl %edi
38; i686_AVX-NEXT:    retl
39;
40; x86_64_AVX-LABEL: test:
41; x86_64_AVX:       # %bb.0: # %entry
42; x86_64_AVX-NEXT:    movq %rsi, %rdi
43; x86_64_AVX-NEXT:    vmaskmovdqu %xmm1, %xmm0
44; x86_64_AVX-NEXT:    retq
45; x86_x32_AVX-LABEL: test:
46; x86_x32_AVX:       # %bb.0: # %entry
47; x86_x32_AVX-NEXT:    movq %rsi, %rdi
48; x86_x32_AVX-NEXT:    # kill: def $edi killed $edi killed $rdi
49; x86_x32_AVX-NEXT:    addr32 vmaskmovdqu %xmm1, %xmm0
50; x86_x32_AVX-NEXT:    retq
51entry:
52	tail call void @llvm.x86.sse2.maskmov.dqu( <16 x i8> %a, <16 x i8> %b, ptr %c )
53	ret void
54}
55
56declare void @llvm.x86.sse2.maskmov.dqu(<16 x i8>, <16 x i8>, ptr) nounwind
57