xref: /minix3/external/bsd/llvm/dist/llvm/test/CodeGen/X86/vshift-3.ll (revision f4a2713ac843a11c696ec80c0a5e3e5d80b4d338)
1*f4a2713aSLionel Sambuc; RUN: llc < %s -march=x86 -mattr=+sse2 | FileCheck %s
2*f4a2713aSLionel Sambuc
3*f4a2713aSLionel Sambuc; test vector shifts converted to proper SSE2 vector shifts when the shift
4*f4a2713aSLionel Sambuc; amounts are the same.
5*f4a2713aSLionel Sambuc
6*f4a2713aSLionel Sambuc; Note that x86 does have ashr
7*f4a2713aSLionel Sambuc
8*f4a2713aSLionel Sambuc; shift1a can't use a packed shift
9*f4a2713aSLionel Sambucdefine void @shift1a(<2 x i64> %val, <2 x i64>* %dst) nounwind {
10*f4a2713aSLionel Sambucentry:
11*f4a2713aSLionel Sambuc; CHECK-LABEL: shift1a:
12*f4a2713aSLionel Sambuc; CHECK: sarl
13*f4a2713aSLionel Sambuc  %ashr = ashr <2 x i64> %val, < i64 32, i64 32 >
14*f4a2713aSLionel Sambuc  store <2 x i64> %ashr, <2 x i64>* %dst
15*f4a2713aSLionel Sambuc  ret void
16*f4a2713aSLionel Sambuc}
17*f4a2713aSLionel Sambuc
18*f4a2713aSLionel Sambucdefine void @shift2a(<4 x i32> %val, <4 x i32>* %dst) nounwind {
19*f4a2713aSLionel Sambucentry:
20*f4a2713aSLionel Sambuc; CHECK-LABEL: shift2a:
21*f4a2713aSLionel Sambuc; CHECK: psrad	$5
22*f4a2713aSLionel Sambuc  %ashr = ashr <4 x i32> %val, < i32 5, i32 5, i32 5, i32 5 >
23*f4a2713aSLionel Sambuc  store <4 x i32> %ashr, <4 x i32>* %dst
24*f4a2713aSLionel Sambuc  ret void
25*f4a2713aSLionel Sambuc}
26*f4a2713aSLionel Sambuc
27*f4a2713aSLionel Sambucdefine void @shift2b(<4 x i32> %val, <4 x i32>* %dst, i32 %amt) nounwind {
28*f4a2713aSLionel Sambucentry:
29*f4a2713aSLionel Sambuc; CHECK-LABEL: shift2b:
30*f4a2713aSLionel Sambuc; CHECK: movd
31*f4a2713aSLionel Sambuc; CHECK: psrad
32*f4a2713aSLionel Sambuc  %0 = insertelement <4 x i32> undef, i32 %amt, i32 0
33*f4a2713aSLionel Sambuc  %1 = insertelement <4 x i32> %0, i32 %amt, i32 1
34*f4a2713aSLionel Sambuc  %2 = insertelement <4 x i32> %1, i32 %amt, i32 2
35*f4a2713aSLionel Sambuc  %3 = insertelement <4 x i32> %2, i32 %amt, i32 3
36*f4a2713aSLionel Sambuc  %ashr = ashr <4 x i32> %val, %3
37*f4a2713aSLionel Sambuc  store <4 x i32> %ashr, <4 x i32>* %dst
38*f4a2713aSLionel Sambuc  ret void
39*f4a2713aSLionel Sambuc}
40*f4a2713aSLionel Sambuc
41*f4a2713aSLionel Sambucdefine void @shift3a(<8 x i16> %val, <8 x i16>* %dst) nounwind {
42*f4a2713aSLionel Sambucentry:
43*f4a2713aSLionel Sambuc; CHECK-LABEL: shift3a:
44*f4a2713aSLionel Sambuc; CHECK: psraw	$5
45*f4a2713aSLionel Sambuc  %ashr = ashr <8 x i16> %val, < i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5 >
46*f4a2713aSLionel Sambuc  store <8 x i16> %ashr, <8 x i16>* %dst
47*f4a2713aSLionel Sambuc  ret void
48*f4a2713aSLionel Sambuc}
49*f4a2713aSLionel Sambuc
50*f4a2713aSLionel Sambucdefine void @shift3b(<8 x i16> %val, <8 x i16>* %dst, i16 %amt) nounwind {
51*f4a2713aSLionel Sambucentry:
52*f4a2713aSLionel Sambuc; CHECK-LABEL: shift3b:
53*f4a2713aSLionel Sambuc; CHECK: movzwl
54*f4a2713aSLionel Sambuc; CHECK: movd
55*f4a2713aSLionel Sambuc; CHECK: psraw
56*f4a2713aSLionel Sambuc  %0 = insertelement <8 x i16> undef, i16 %amt, i32 0
57*f4a2713aSLionel Sambuc  %1 = insertelement <8 x i16> %0, i16 %amt, i32 1
58*f4a2713aSLionel Sambuc  %2 = insertelement <8 x i16> %1, i16 %amt, i32 2
59*f4a2713aSLionel Sambuc  %3 = insertelement <8 x i16> %2, i16 %amt, i32 3
60*f4a2713aSLionel Sambuc  %4 = insertelement <8 x i16> %3, i16 %amt, i32 4
61*f4a2713aSLionel Sambuc  %5 = insertelement <8 x i16> %4, i16 %amt, i32 5
62*f4a2713aSLionel Sambuc  %6 = insertelement <8 x i16> %5, i16 %amt, i32 6
63*f4a2713aSLionel Sambuc  %7 = insertelement <8 x i16> %6, i16 %amt, i32 7
64*f4a2713aSLionel Sambuc  %ashr = ashr <8 x i16> %val, %7
65*f4a2713aSLionel Sambuc  store <8 x i16> %ashr, <8 x i16>* %dst
66*f4a2713aSLionel Sambuc  ret void
67*f4a2713aSLionel Sambuc}
68