xref: /minix3/external/bsd/llvm/dist/llvm/test/CodeGen/X86/vector-trunc.ll (revision 0a6a1f1d05b60e214de2f05a7310ddd1f0e590e7)
1; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+ssse3 | FileCheck %s --check-prefix=SSE --check-prefix=SSSE3
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
4; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
5
6define i64 @trunc2i64(<2 x i64> %inval) {
7; SSE-LABEL:  trunc2i64:
8; SSE:        # BB#0: # %entry
9; SSE-NEXT:   pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
10; SSE-NEXT:   movd %xmm0, %rax
11; SSE-NEXT:   retq
12
13; AVX-LABEL:  trunc2i64:
14; AVX:        # BB#0: # %entry
15; AVX-NEXT:   vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
16; AVX-NEXT:   vmovq %xmm0, %rax
17; AVX-NEXT:   retq
18
19entry:
20  %0 = trunc <2 x i64> %inval to <2 x i32>
21  %1 = bitcast <2 x i32> %0 to i64
22  ret i64 %1
23}
24
25; PR15524 http://llvm.org/bugs/show_bug.cgi?id=15524
26define i64 @trunc4i32(<4 x i32> %inval) {
27; SSE2-LABEL:  trunc4i32:
28; SSE2:        # BB#0: # %entry
29; SSE2-NEXT:   pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
30; SSE2-NEXT:   pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
31; SSE2-NEXT:   pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
32; SSE2-NEXT:   movd %xmm0, %rax
33; SSE2-NEXT:   retq
34
35; SSSE3-LABEL: trunc4i32:
36; SSSE3:       # BB#0: # %entry
37; SSSE3-NEXT:  pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
38; SSSE3-NEXT:  movd %xmm0, %rax
39; SSSE3-NEXT:  retq
40
41; SSE41-LABEL: trunc4i32:
42; SSE41:       # BB#0: # %entry
43; SSE41-NEXT:  pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
44; SSE41-NEXT:  movd %xmm0, %rax
45; SSE41-NEXT:  retq
46
47; AVX-LABEL:  trunc4i32:
48; AVX:        # BB#0: # %entry
49; AVX-NEXT:   vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
50; AVX-NEXT:   vmovq %xmm0, %rax
51; AVX-NEXT:   retq
52
53entry:
54  %0 = trunc <4 x i32> %inval to <4 x i16>
55  %1 = bitcast <4 x i16> %0 to i64
56  ret i64 %1
57}
58
59; PR15524 http://llvm.org/bugs/show_bug.cgi?id=15524
60define i64 @trunc8i16(<8 x i16> %inval) {
61; SSE2-LABEL:  trunc8i16:
62; SSE2:        # BB#0: # %entry
63; SSE2-NEXT:   pand .LCP{{.*}}(%rip), %xmm0
64; SSE2-NEXT:   packuswb %xmm0, %xmm0
65; SSE2-NEXT:   movd %xmm0, %rax
66; SSE2-NEXT:   retq
67
68; SSSE3-LABEL: trunc8i16:
69; SSSE3:       # BB#0: # %entry
70; SSSE3-NEXT:  pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
71; SSSE3-NEXT:  movd %xmm0, %rax
72; SSSE3-NEXT:  retq
73
74; SSE41-LABEL: trunc8i16:
75; SSE41:       # BB#0: # %entry
76; SSE41-NEXT:  pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
77; SSE41-NEXT:  movd %xmm0, %rax
78; SSE41-NEXT:  retq
79
80; AVX-LABEL:  trunc8i16:
81; AVX:        # BB#0: # %entry
82; AVX-NEXT:   vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
83; AVX-NEXT:   vmovq %xmm0, %rax
84; AVX-NEXT:   retq
85
86entry:
87  %0 = trunc <8 x i16> %inval to <8 x i8>
88  %1 = bitcast <8 x i8> %0 to i64
89  ret i64 %1
90}
91