xref: /llvm-project/llvm/test/CodeGen/X86/pr15296.ll (revision cf5c63d187f4e8e2b1855c2c37fbc79d47852ec8)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-pc-linux -mcpu=corei7-avx | FileCheck %s --check-prefixes=X86
3; RUN: llc < %s -mtriple=x86_64-pc-linux -mcpu=corei7-avx | FileCheck %s --check-prefixes=X64
4
5define <8 x i32> @shiftInput___vyuunu(<8 x i32> %input, i32 %shiftval, <8 x i32> %__mask) nounwind {
6; X86-LABEL: shiftInput___vyuunu:
7; X86:       # %bb.0: # %allocas
8; X86-NEXT:    vextractf128 $1, %ymm0, %xmm1
9; X86-NEXT:    vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero
10; X86-NEXT:    vpsrld %xmm2, %xmm1, %xmm1
11; X86-NEXT:    vpsrld %xmm2, %xmm0, %xmm0
12; X86-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
13; X86-NEXT:    retl
14;
15; X64-LABEL: shiftInput___vyuunu:
16; X64:       # %bb.0: # %allocas
17; X64-NEXT:    vmovd %edi, %xmm1
18; X64-NEXT:    vextractf128 $1, %ymm0, %xmm2
19; X64-NEXT:    vpsrld %xmm1, %xmm2, %xmm2
20; X64-NEXT:    vpsrld %xmm1, %xmm0, %xmm0
21; X64-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
22; X64-NEXT:    retq
23allocas:
24  %smear.0 = insertelement <8 x i32> undef, i32 %shiftval, i32 0
25  %smear.1 = insertelement <8 x i32> %smear.0, i32 %shiftval, i32 1
26  %smear.2 = insertelement <8 x i32> %smear.1, i32 %shiftval, i32 2
27  %smear.3 = insertelement <8 x i32> %smear.2, i32 %shiftval, i32 3
28  %smear.4 = insertelement <8 x i32> %smear.3, i32 %shiftval, i32 4
29  %smear.5 = insertelement <8 x i32> %smear.4, i32 %shiftval, i32 5
30  %smear.6 = insertelement <8 x i32> %smear.5, i32 %shiftval, i32 6
31  %smear.7 = insertelement <8 x i32> %smear.6, i32 %shiftval, i32 7
32  %bitop = lshr <8 x i32> %input, %smear.7
33  ret <8 x i32> %bitop
34}
35
36define <8 x i32> @shiftInput___canonical(<8 x i32> %input, i32 %shiftval, <8 x i32> %__mask) nounwind {
37; X86-LABEL: shiftInput___canonical:
38; X86:       # %bb.0: # %allocas
39; X86-NEXT:    vextractf128 $1, %ymm0, %xmm1
40; X86-NEXT:    vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero
41; X86-NEXT:    vpsrld %xmm2, %xmm1, %xmm1
42; X86-NEXT:    vpsrld %xmm2, %xmm0, %xmm0
43; X86-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
44; X86-NEXT:    retl
45;
46; X64-LABEL: shiftInput___canonical:
47; X64:       # %bb.0: # %allocas
48; X64-NEXT:    vmovd %edi, %xmm1
49; X64-NEXT:    vextractf128 $1, %ymm0, %xmm2
50; X64-NEXT:    vpsrld %xmm1, %xmm2, %xmm2
51; X64-NEXT:    vpsrld %xmm1, %xmm0, %xmm0
52; X64-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
53; X64-NEXT:    retq
54allocas:
55  %smear.0 = insertelement <8 x i32> undef, i32 %shiftval, i32 0
56  %smear.7 = shufflevector <8 x i32> %smear.0, <8 x i32> undef, <8 x i32> zeroinitializer
57  %bitop = lshr <8 x i32> %input, %smear.7
58  ret <8 x i32> %bitop
59}
60
61define <4 x i64> @shiftInput___64in32bitmode(<4 x i64> %input, i64 %shiftval) nounwind {
62; X86-LABEL: shiftInput___64in32bitmode:
63; X86:       # %bb.0: # %allocas
64; X86-NEXT:    vextractf128 $1, %ymm0, %xmm1
65; X86-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
66; X86-NEXT:    vpsrlq %xmm2, %xmm1, %xmm1
67; X86-NEXT:    vpsrlq %xmm2, %xmm0, %xmm0
68; X86-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
69; X86-NEXT:    retl
70;
71; X64-LABEL: shiftInput___64in32bitmode:
72; X64:       # %bb.0: # %allocas
73; X64-NEXT:    vmovq %rdi, %xmm1
74; X64-NEXT:    vextractf128 $1, %ymm0, %xmm2
75; X64-NEXT:    vpsrlq %xmm1, %xmm2, %xmm2
76; X64-NEXT:    vpsrlq %xmm1, %xmm0, %xmm0
77; X64-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
78; X64-NEXT:    retq
79allocas:
80  %smear.0 = insertelement <4 x i64> undef, i64 %shiftval, i32 0
81  %smear.7 = shufflevector <4 x i64> %smear.0, <4 x i64> undef, <4 x i32> zeroinitializer
82  %bitop = lshr <4 x i64> %input, %smear.7
83  ret <4 x i64> %bitop
84}
85
86define <4 x i64> @shiftInput___2x32bitcast(<4 x i64> %input, i32 %shiftval) nounwind {
87; X86-LABEL: shiftInput___2x32bitcast:
88; X86:       # %bb.0: # %allocas
89; X86-NEXT:    vextractf128 $1, %ymm0, %xmm1
90; X86-NEXT:    vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero
91; X86-NEXT:    vpsrlq %xmm2, %xmm1, %xmm1
92; X86-NEXT:    vpsrlq %xmm2, %xmm0, %xmm0
93; X86-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
94; X86-NEXT:    retl
95;
96; X64-LABEL: shiftInput___2x32bitcast:
97; X64:       # %bb.0: # %allocas
98; X64-NEXT:    vmovd %edi, %xmm1
99; X64-NEXT:    vextractf128 $1, %ymm0, %xmm2
100; X64-NEXT:    vpsrlq %xmm1, %xmm2, %xmm2
101; X64-NEXT:    vpsrlq %xmm1, %xmm0, %xmm0
102; X64-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
103; X64-NEXT:    retq
104allocas:
105  %smear.0 = insertelement <8 x i32> zeroinitializer, i32 %shiftval, i32 0
106  %smear.1 = insertelement <8 x i32> %smear.0, i32 %shiftval, i32 2
107  %smear.2 = insertelement <8 x i32> %smear.1, i32 %shiftval, i32 4
108  %smear.3 = insertelement <8 x i32> %smear.2, i32 %shiftval, i32 6
109  %smear.4 = bitcast <8 x i32> %smear.3 to <4 x i64>
110  %bitop = lshr <4 x i64> %input, %smear.4
111  ret <4 x i64> %bitop
112}
113