xref: /llvm-project/llvm/test/CodeGen/X86/widen_load-3.ll (revision f0e8d003e5157c128f8f2375eac487b38e445648)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-linux -mattr=+sse4.2 | FileCheck %s --check-prefix=X86-SSE
3; RUN: llc < %s -mtriple=i686-linux -mattr=+avx    | FileCheck %s --check-prefix=X86-AVX
4; RUN: llc < %s -mtriple=i686-linux -mattr=+avx2   | FileCheck %s --check-prefix=X86-AVX
5; RUN: llc < %s -mtriple=x86_64-linux -mattr=+sse4.2 | FileCheck %s --check-prefix=X64-SSE
6; RUN: llc < %s -mtriple=x86_64-linux -mattr=+avx    | FileCheck %s --check-prefix=X64-AVX
7; RUN: llc < %s -mtriple=x86_64-linux -mattr=+avx2   | FileCheck %s --check-prefix=X64-AVX
8
9; PR27708
10
11define <7 x i64> @load7_aligned(ptr %x) nounwind {
12; X86-SSE-LABEL: load7_aligned:
13; X86-SSE:       # %bb.0:
14; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
15; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
16; X86-SSE-NEXT:    movaps (%ecx), %xmm0
17; X86-SSE-NEXT:    movaps 16(%ecx), %xmm1
18; X86-SSE-NEXT:    movaps 32(%ecx), %xmm2
19; X86-SSE-NEXT:    movl 48(%ecx), %edx
20; X86-SSE-NEXT:    movl 52(%ecx), %ecx
21; X86-SSE-NEXT:    movl %ecx, 52(%eax)
22; X86-SSE-NEXT:    movl %edx, 48(%eax)
23; X86-SSE-NEXT:    movaps %xmm2, 32(%eax)
24; X86-SSE-NEXT:    movaps %xmm1, 16(%eax)
25; X86-SSE-NEXT:    movaps %xmm0, (%eax)
26; X86-SSE-NEXT:    retl $4
27;
28; X86-AVX-LABEL: load7_aligned:
29; X86-AVX:       # %bb.0:
30; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
31; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
32; X86-AVX-NEXT:    vmovaps (%ecx), %ymm0
33; X86-AVX-NEXT:    vmovaps 48(%ecx), %xmm1
34; X86-AVX-NEXT:    vextractps $1, %xmm1, 52(%eax)
35; X86-AVX-NEXT:    vmovss %xmm1, 48(%eax)
36; X86-AVX-NEXT:    vmovaps 32(%ecx), %xmm1
37; X86-AVX-NEXT:    vmovaps %xmm1, 32(%eax)
38; X86-AVX-NEXT:    vmovaps %ymm0, (%eax)
39; X86-AVX-NEXT:    vzeroupper
40; X86-AVX-NEXT:    retl $4
41;
42; X64-SSE-LABEL: load7_aligned:
43; X64-SSE:       # %bb.0:
44; X64-SSE-NEXT:    movq %rdi, %rax
45; X64-SSE-NEXT:    movaps (%rsi), %xmm0
46; X64-SSE-NEXT:    movaps 16(%rsi), %xmm1
47; X64-SSE-NEXT:    movaps 32(%rsi), %xmm2
48; X64-SSE-NEXT:    movq 48(%rsi), %rcx
49; X64-SSE-NEXT:    movq %rcx, 48(%rdi)
50; X64-SSE-NEXT:    movaps %xmm2, 32(%rdi)
51; X64-SSE-NEXT:    movaps %xmm1, 16(%rdi)
52; X64-SSE-NEXT:    movaps %xmm0, (%rdi)
53; X64-SSE-NEXT:    retq
54;
55; X64-AVX-LABEL: load7_aligned:
56; X64-AVX:       # %bb.0:
57; X64-AVX-NEXT:    movq %rdi, %rax
58; X64-AVX-NEXT:    vmovaps (%rsi), %ymm0
59; X64-AVX-NEXT:    movq 48(%rsi), %rcx
60; X64-AVX-NEXT:    movq %rcx, 48(%rdi)
61; X64-AVX-NEXT:    vmovaps 32(%rsi), %xmm1
62; X64-AVX-NEXT:    vmovaps %xmm1, 32(%rdi)
63; X64-AVX-NEXT:    vmovaps %ymm0, (%rdi)
64; X64-AVX-NEXT:    vzeroupper
65; X64-AVX-NEXT:    retq
66  %x1 = load <7 x i64>, ptr %x
67  ret <7 x i64> %x1
68}
69
70define <7 x i64> @load7_unaligned(ptr %x) nounwind {
71; X86-SSE-LABEL: load7_unaligned:
72; X86-SSE:       # %bb.0:
73; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
74; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
75; X86-SSE-NEXT:    movups (%ecx), %xmm0
76; X86-SSE-NEXT:    movups 16(%ecx), %xmm1
77; X86-SSE-NEXT:    movups 32(%ecx), %xmm2
78; X86-SSE-NEXT:    movl 48(%ecx), %edx
79; X86-SSE-NEXT:    movl 52(%ecx), %ecx
80; X86-SSE-NEXT:    movl %ecx, 52(%eax)
81; X86-SSE-NEXT:    movl %edx, 48(%eax)
82; X86-SSE-NEXT:    movaps %xmm2, 32(%eax)
83; X86-SSE-NEXT:    movaps %xmm1, 16(%eax)
84; X86-SSE-NEXT:    movaps %xmm0, (%eax)
85; X86-SSE-NEXT:    retl $4
86;
87; X86-AVX-LABEL: load7_unaligned:
88; X86-AVX:       # %bb.0:
89; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
90; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
91; X86-AVX-NEXT:    vmovups (%ecx), %ymm0
92; X86-AVX-NEXT:    vmovups 32(%ecx), %xmm1
93; X86-AVX-NEXT:    movl 48(%ecx), %edx
94; X86-AVX-NEXT:    movl 52(%ecx), %ecx
95; X86-AVX-NEXT:    movl %ecx, 52(%eax)
96; X86-AVX-NEXT:    movl %edx, 48(%eax)
97; X86-AVX-NEXT:    vmovaps %xmm1, 32(%eax)
98; X86-AVX-NEXT:    vmovaps %ymm0, (%eax)
99; X86-AVX-NEXT:    vzeroupper
100; X86-AVX-NEXT:    retl $4
101;
102; X64-SSE-LABEL: load7_unaligned:
103; X64-SSE:       # %bb.0:
104; X64-SSE-NEXT:    movq %rdi, %rax
105; X64-SSE-NEXT:    movups (%rsi), %xmm0
106; X64-SSE-NEXT:    movups 16(%rsi), %xmm1
107; X64-SSE-NEXT:    movups 32(%rsi), %xmm2
108; X64-SSE-NEXT:    movq 48(%rsi), %rcx
109; X64-SSE-NEXT:    movq %rcx, 48(%rdi)
110; X64-SSE-NEXT:    movaps %xmm2, 32(%rdi)
111; X64-SSE-NEXT:    movaps %xmm1, 16(%rdi)
112; X64-SSE-NEXT:    movaps %xmm0, (%rdi)
113; X64-SSE-NEXT:    retq
114;
115; X64-AVX-LABEL: load7_unaligned:
116; X64-AVX:       # %bb.0:
117; X64-AVX-NEXT:    movq %rdi, %rax
118; X64-AVX-NEXT:    vmovups (%rsi), %ymm0
119; X64-AVX-NEXT:    vmovups 32(%rsi), %xmm1
120; X64-AVX-NEXT:    movq 48(%rsi), %rcx
121; X64-AVX-NEXT:    movq %rcx, 48(%rdi)
122; X64-AVX-NEXT:    vmovaps %xmm1, 32(%rdi)
123; X64-AVX-NEXT:    vmovaps %ymm0, (%rdi)
124; X64-AVX-NEXT:    vzeroupper
125; X64-AVX-NEXT:    retq
126  %x1 = load <7 x i64>, ptr %x, align 1
127  ret <7 x i64> %x1
128}
129
130; PR42305 - https://bugs.llvm.org/show_bug.cgi?id=42305
131
132define void @load_split(ptr %ld, ptr %st1, ptr %st2) nounwind {
133; X86-SSE-LABEL: load_split:
134; X86-SSE:       # %bb.0:
135; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
136; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
137; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %edx
138; X86-SSE-NEXT:    movups (%edx), %xmm0
139; X86-SSE-NEXT:    movups 16(%edx), %xmm1
140; X86-SSE-NEXT:    movups %xmm0, (%ecx)
141; X86-SSE-NEXT:    movups %xmm1, (%eax)
142; X86-SSE-NEXT:    retl
143;
144; X86-AVX-LABEL: load_split:
145; X86-AVX:       # %bb.0:
146; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
147; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
148; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %edx
149; X86-AVX-NEXT:    vmovups (%edx), %ymm0
150; X86-AVX-NEXT:    vmovups %xmm0, (%ecx)
151; X86-AVX-NEXT:    vextractf128 $1, %ymm0, (%eax)
152; X86-AVX-NEXT:    vzeroupper
153; X86-AVX-NEXT:    retl
154;
155; X64-SSE-LABEL: load_split:
156; X64-SSE:       # %bb.0:
157; X64-SSE-NEXT:    movups (%rdi), %xmm0
158; X64-SSE-NEXT:    movups 16(%rdi), %xmm1
159; X64-SSE-NEXT:    movups %xmm0, (%rsi)
160; X64-SSE-NEXT:    movups %xmm1, (%rdx)
161; X64-SSE-NEXT:    retq
162;
163; X64-AVX-LABEL: load_split:
164; X64-AVX:       # %bb.0:
165; X64-AVX-NEXT:    vmovups (%rdi), %ymm0
166; X64-AVX-NEXT:    vmovups %xmm0, (%rsi)
167; X64-AVX-NEXT:    vextractf128 $1, %ymm0, (%rdx)
168; X64-AVX-NEXT:    vzeroupper
169; X64-AVX-NEXT:    retq
170  %t256 = load <8 x float>, ptr %ld, align 1
171  %b128 = shufflevector <8 x float> %t256, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
172  store <4 x float> %b128, ptr %st1, align 1
173  %t128 = shufflevector <8 x float> %t256, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
174  store <4 x float> %t128, ptr %st2, align 1
175  ret void
176}
177
178define void @load_split_more(ptr %src, ptr %idx, ptr %dst) nounwind {
179; X86-SSE-LABEL: load_split_more:
180; X86-SSE:       # %bb.0:
181; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
182; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
183; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %edx
184; X86-SSE-NEXT:    movups (%edx), %xmm0
185; X86-SSE-NEXT:    movups 16(%edx), %xmm1
186; X86-SSE-NEXT:    movl (%ecx), %edx
187; X86-SSE-NEXT:    movups %xmm0, (%eax,%edx,4)
188; X86-SSE-NEXT:    movl 4(%ecx), %ecx
189; X86-SSE-NEXT:    movups %xmm1, (%eax,%ecx,4)
190; X86-SSE-NEXT:    retl
191;
192; X86-AVX-LABEL: load_split_more:
193; X86-AVX:       # %bb.0:
194; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
195; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
196; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %edx
197; X86-AVX-NEXT:    vmovups (%edx), %ymm0
198; X86-AVX-NEXT:    movl (%ecx), %edx
199; X86-AVX-NEXT:    vmovups %xmm0, (%eax,%edx,4)
200; X86-AVX-NEXT:    movl 4(%ecx), %ecx
201; X86-AVX-NEXT:    vextractf128 $1, %ymm0, (%eax,%ecx,4)
202; X86-AVX-NEXT:    vzeroupper
203; X86-AVX-NEXT:    retl
204;
205; X64-SSE-LABEL: load_split_more:
206; X64-SSE:       # %bb.0:
207; X64-SSE-NEXT:    movups (%rdi), %xmm0
208; X64-SSE-NEXT:    movups 16(%rdi), %xmm1
209; X64-SSE-NEXT:    movslq (%rsi), %rax
210; X64-SSE-NEXT:    movups %xmm0, (%rdx,%rax,4)
211; X64-SSE-NEXT:    movslq 4(%rsi), %rax
212; X64-SSE-NEXT:    movups %xmm1, (%rdx,%rax,4)
213; X64-SSE-NEXT:    retq
214;
215; X64-AVX-LABEL: load_split_more:
216; X64-AVX:       # %bb.0:
217; X64-AVX-NEXT:    vmovups (%rdi), %ymm0
218; X64-AVX-NEXT:    movslq (%rsi), %rax
219; X64-AVX-NEXT:    vmovups %xmm0, (%rdx,%rax,4)
220; X64-AVX-NEXT:    movslq 4(%rsi), %rax
221; X64-AVX-NEXT:    vextractf128 $1, %ymm0, (%rdx,%rax,4)
222; X64-AVX-NEXT:    vzeroupper
223; X64-AVX-NEXT:    retq
224  %tmp = load <8 x float>, ptr %src, align 1
225  %tmp1 = load i32, ptr %idx, align 4
226  %idx.ext = sext i32 %tmp1 to i64
227  %add.ptr1 = getelementptr inbounds float, ptr %dst, i64 %idx.ext
228  %extract = shufflevector <8 x float> %tmp, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
229  store <4 x float> %extract, ptr %add.ptr1, align 1
230  %arrayidx2 = getelementptr inbounds i32, ptr %idx, i64 1
231  %tmp2 = load i32, ptr %arrayidx2, align 4
232  %idx.ext3 = sext i32 %tmp2 to i64
233  %add.ptr4 = getelementptr inbounds float, ptr %dst, i64 %idx.ext3
234  %extract5 = shufflevector <8 x float> %tmp, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
235  store <4 x float> %extract5, ptr %add.ptr4, align 1
236  ret void
237}
238