xref: /llvm-project/llvm/test/Transforms/InterleavedAccess/AArch64/binopshuffles.ll (revision cd6e462d012f289cc4ec12927ca8198f9ed1469e)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -interleaved-access -S | FileCheck %s
3; RUN: opt < %s -passes=interleaved-access -S | FileCheck %s
4
5target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
6target triple = "aarch64--linux-gnu"
7
8define <4 x float> @vld2(ptr %pSrc) {
9; CHECK-LABEL: @vld2(
10; CHECK-NEXT:  entry:
11; CHECK-NEXT:    [[LDN:%.*]] = call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2.v4f32.p0(ptr [[PSRC:%.*]])
12; CHECK-NEXT:    [[TMP0:%.*]] = extractvalue { <4 x float>, <4 x float> } [[LDN]], 1
13; CHECK-NEXT:    [[TMP1:%.*]] = extractvalue { <4 x float>, <4 x float> } [[LDN]], 1
14; CHECK-NEXT:    [[TMP2:%.*]] = extractvalue { <4 x float>, <4 x float> } [[LDN]], 0
15; CHECK-NEXT:    [[TMP3:%.*]] = extractvalue { <4 x float>, <4 x float> } [[LDN]], 0
16; CHECK-NEXT:    [[L26:%.*]] = fmul fast <4 x float> [[TMP2]], [[TMP3]]
17; CHECK-NEXT:    [[L43:%.*]] = fmul fast <4 x float> [[TMP0]], [[TMP1]]
18; CHECK-NEXT:    [[L6:%.*]] = fadd fast <4 x float> [[L43]], [[L26]]
19; CHECK-NEXT:    ret <4 x float> [[L6]]
20;
21entry:
22  %wide.vec = load <8 x float>, ptr %pSrc, align 4
23  %l2 = fmul fast <8 x float> %wide.vec, %wide.vec
24  %l3 = shufflevector <8 x float> %l2, <8 x float> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
25  %l4 = fmul fast <8 x float> %wide.vec, %wide.vec
26  %l5 = shufflevector <8 x float> %l4, <8 x float> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
27  %l6 = fadd fast <4 x float> %l5, %l3
28  ret <4 x float> %l6
29}
30
31define <4 x float> @vld3(ptr %pSrc) {
32; CHECK-LABEL: @vld3(
33; CHECK-NEXT:  entry:
34; CHECK-NEXT:    [[LDN:%.*]] = call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3.v4f32.p0(ptr [[PSRC:%.*]])
35; CHECK-NEXT:    [[TMP0:%.*]] = extractvalue { <4 x float>, <4 x float>, <4 x float> } [[LDN]], 2
36; CHECK-NEXT:    [[TMP1:%.*]] = extractvalue { <4 x float>, <4 x float>, <4 x float> } [[LDN]], 2
37; CHECK-NEXT:    [[TMP2:%.*]] = extractvalue { <4 x float>, <4 x float>, <4 x float> } [[LDN]], 1
38; CHECK-NEXT:    [[TMP3:%.*]] = extractvalue { <4 x float>, <4 x float>, <4 x float> } [[LDN]], 1
39; CHECK-NEXT:    [[TMP4:%.*]] = extractvalue { <4 x float>, <4 x float>, <4 x float> } [[LDN]], 0
40; CHECK-NEXT:    [[TMP5:%.*]] = extractvalue { <4 x float>, <4 x float>, <4 x float> } [[LDN]], 0
41; CHECK-NEXT:    [[L29:%.*]] = fmul fast <4 x float> [[TMP4]], [[TMP5]]
42; CHECK-NEXT:    [[L46:%.*]] = fmul fast <4 x float> [[TMP2]], [[TMP3]]
43; CHECK-NEXT:    [[L6:%.*]] = fadd fast <4 x float> [[L46]], [[L29]]
44; CHECK-NEXT:    [[L73:%.*]] = fmul fast <4 x float> [[TMP0]], [[TMP1]]
45; CHECK-NEXT:    [[L9:%.*]] = fadd fast <4 x float> [[L6]], [[L73]]
46; CHECK-NEXT:    ret <4 x float> [[L9]]
47;
48entry:
49  %wide.vec = load <12 x float>, ptr %pSrc, align 4
50  %l2 = fmul fast <12 x float> %wide.vec, %wide.vec
51  %l3 = shufflevector <12 x float> %l2, <12 x float> undef, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
52  %l4 = fmul fast <12 x float> %wide.vec, %wide.vec
53  %l5 = shufflevector <12 x float> %l4, <12 x float> undef, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
54  %l6 = fadd fast <4 x float> %l5, %l3
55  %l7 = fmul fast <12 x float> %wide.vec, %wide.vec
56  %l8 = shufflevector <12 x float> %l7, <12 x float> undef, <4 x i32> <i32 2, i32 5, i32 8, i32 11>
57  %l9 = fadd fast <4 x float> %l6, %l8
58  ret <4 x float> %l9
59}
60
61define <4 x float> @vld4(ptr %pSrc) {
62; CHECK-LABEL: @vld4(
63; CHECK-NEXT:  entry:
64; CHECK-NEXT:    [[LDN:%.*]] = call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4.v4f32.p0(ptr [[PSRC:%.*]])
65; CHECK-NEXT:    [[TMP0:%.*]] = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } [[LDN]], 3
66; CHECK-NEXT:    [[TMP1:%.*]] = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } [[LDN]], 3
67; CHECK-NEXT:    [[TMP2:%.*]] = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } [[LDN]], 2
68; CHECK-NEXT:    [[TMP3:%.*]] = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } [[LDN]], 2
69; CHECK-NEXT:    [[TMP4:%.*]] = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } [[LDN]], 1
70; CHECK-NEXT:    [[TMP5:%.*]] = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } [[LDN]], 1
71; CHECK-NEXT:    [[TMP6:%.*]] = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } [[LDN]], 0
72; CHECK-NEXT:    [[TMP7:%.*]] = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } [[LDN]], 0
73; CHECK-NEXT:    [[L312:%.*]] = fmul fast <4 x float> [[TMP6]], [[TMP7]]
74; CHECK-NEXT:    [[L59:%.*]] = fmul fast <4 x float> [[TMP4]], [[TMP5]]
75; CHECK-NEXT:    [[L7:%.*]] = fadd fast <4 x float> [[L59]], [[L312]]
76; CHECK-NEXT:    [[L86:%.*]] = fmul fast <4 x float> [[TMP2]], [[TMP3]]
77; CHECK-NEXT:    [[L103:%.*]] = fmul fast <4 x float> [[TMP0]], [[TMP1]]
78; CHECK-NEXT:    [[L12:%.*]] = fadd fast <4 x float> [[L103]], [[L86]]
79; CHECK-NEXT:    ret <4 x float> [[L12]]
80;
81entry:
82  %wide.vec = load <16 x float>, ptr %pSrc, align 4
83  %l3 = fmul fast <16 x float> %wide.vec, %wide.vec
84  %l4 = shufflevector <16 x float> %l3, <16 x float> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
85  %l5 = fmul fast <16 x float> %wide.vec, %wide.vec
86  %l6 = shufflevector <16 x float> %l5, <16 x float> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
87  %l7 = fadd fast <4 x float> %l6, %l4
88  %l8 = fmul fast <16 x float> %wide.vec, %wide.vec
89  %l9 = shufflevector <16 x float> %l8, <16 x float> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
90  %l10 = fmul fast <16 x float> %wide.vec, %wide.vec
91  %l11 = shufflevector <16 x float> %l10, <16 x float> undef, <4 x i32> <i32 3, i32 7, i32 11, i32 15>
92  %l12 = fadd fast <4 x float> %l11, %l9
93  ret <4 x float> %l12
94}
95
96define <4 x float> @twosrc(ptr %pSrc1, ptr %pSrc2) {
97; CHECK-LABEL: @twosrc(
98; CHECK-NEXT:  entry:
99; CHECK-NEXT:    [[LDN:%.*]] = call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2.v4f32.p0(ptr [[PSRC1:%.*]])
100; CHECK-NEXT:    [[TMP0:%.*]] = extractvalue { <4 x float>, <4 x float> } [[LDN]], 1
101; CHECK-NEXT:    [[TMP1:%.*]] = extractvalue { <4 x float>, <4 x float> } [[LDN]], 0
102; CHECK-NEXT:    [[LDN7:%.*]] = call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2.v4f32.p0(ptr [[PSRC2:%.*]])
103; CHECK-NEXT:    [[TMP2:%.*]] = extractvalue { <4 x float>, <4 x float> } [[LDN7]], 0
104; CHECK-NEXT:    [[TMP3:%.*]] = extractvalue { <4 x float>, <4 x float> } [[LDN7]], 1
105; CHECK-NEXT:    [[L46:%.*]] = fmul fast <4 x float> [[TMP2]], [[TMP1]]
106; CHECK-NEXT:    [[L63:%.*]] = fmul fast <4 x float> [[TMP3]], [[TMP0]]
107; CHECK-NEXT:    [[L8:%.*]] = fadd fast <4 x float> [[L63]], [[L46]]
108; CHECK-NEXT:    ret <4 x float> [[L8]]
109;
110entry:
111  %wide.vec = load <8 x float>, ptr %pSrc1, align 4
112  %wide.vec26 = load <8 x float>, ptr %pSrc2, align 4
113  %l4 = fmul fast <8 x float> %wide.vec26, %wide.vec
114  %l5 = shufflevector <8 x float> %l4, <8 x float> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
115  %l6 = fmul fast <8 x float> %wide.vec26, %wide.vec
116  %l7 = shufflevector <8 x float> %l6, <8 x float> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
117  %l8 = fadd fast <4 x float> %l7, %l5
118  ret <4 x float> %l8
119}
120
121define <4 x float> @twosrc2(ptr %pSrc1, ptr %pSrc2) {
122; CHECK-LABEL: @twosrc2(
123; CHECK-NEXT:  entry:
124; CHECK-NEXT:    [[LDN:%.*]] = call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2.v4f32.p0(ptr [[PSRC1:%.*]])
125; CHECK-NEXT:    [[TMP0:%.*]] = extractvalue { <4 x float>, <4 x float> } [[LDN]], 1
126; CHECK-NEXT:    [[TMP1:%.*]] = extractvalue { <4 x float>, <4 x float> } [[LDN]], 0
127; CHECK-NEXT:    [[LDN4:%.*]] = call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2.v4f32.p0(ptr [[PSRC2:%.*]])
128; CHECK-NEXT:    [[TMP2:%.*]] = extractvalue { <4 x float>, <4 x float> } [[LDN4]], 0
129; CHECK-NEXT:    [[TMP3:%.*]] = extractvalue { <4 x float>, <4 x float> } [[LDN4]], 1
130; CHECK-NEXT:    [[L43:%.*]] = fmul fast <4 x float> [[TMP2]], [[TMP1]]
131; CHECK-NEXT:    [[L6:%.*]] = fmul fast <4 x float> [[TMP3]], [[TMP0]]
132; CHECK-NEXT:    [[L8:%.*]] = fadd fast <4 x float> [[L6]], [[L43]]
133; CHECK-NEXT:    ret <4 x float> [[L8]]
134;
135entry:
136  %wide.vec = load <8 x float>, ptr %pSrc1, align 4
137  %wide.vec26 = load <8 x float>, ptr %pSrc2, align 4
138  %l4 = fmul fast <8 x float> %wide.vec26, %wide.vec
139  %l5 = shufflevector <8 x float> %l4, <8 x float> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
140  %s1 = shufflevector <8 x float> %wide.vec26, <8 x float> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
141  %s2 = shufflevector <8 x float> %wide.vec, <8 x float> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
142  %l6 = fmul fast <4 x float> %s1, %s2
143  %l8 = fadd fast <4 x float> %l6, %l5
144  ret <4 x float> %l8
145}
146
147define void @noncanonical(ptr %p0, ptr %p1, ptr %p2) {
148; CHECK-LABEL: @noncanonical(
149; CHECK-NEXT:  entry:
150; CHECK-NEXT:    [[V0:%.*]] = load <8 x i8>, ptr [[P0:%.*]], align 8
151; CHECK-NEXT:    [[V1:%.*]] = add <8 x i8> [[V0]], <i8 0, i8 1, i8 2, i8 3, i8 7, i8 7, i8 7, i8 7>
152; CHECK-NEXT:    [[V2:%.*]] = load <8 x i8>, ptr [[P1:%.*]], align 8
153; CHECK-NEXT:    [[SHUFFLED:%.*]] = shufflevector <8 x i8> [[V2]], <8 x i8> [[V1]], <4 x i32> <i32 0, i32 2, i32 4, i32 6>
154; CHECK-NEXT:    store <4 x i8> [[SHUFFLED]], ptr [[P2:%.*]], align 4
155; CHECK-NEXT:    ret void
156;
157entry:
158  %v0 = load <8 x i8>, ptr %p0
159  %v1 = add <8 x i8> %v0, <i8 0, i8 1, i8 2, i8 3, i8 7, i8 7, i8 7, i8 7>
160  %v2 = load <8 x i8>, ptr %p1
161  %shuffled = shufflevector <8 x i8> %v2, <8 x i8> %v1, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
162  store <4 x i8> %shuffled, ptr %p2
163  ret void
164}
165
166define void @noncanonical2(ptr %p0, ptr %p1, ptr %p2) {
167; CHECK-LABEL: @noncanonical2(
168; CHECK-NEXT:  entry:
169; CHECK-NEXT:    [[V0:%.*]] = load <8 x i8>, ptr [[P0:%.*]], align 8
170; CHECK-NEXT:    [[V1:%.*]] = load <8 x i8>, ptr [[P1:%.*]], align 8
171; CHECK-NEXT:    [[V2:%.*]] = add <8 x i8> [[V0]], [[V1]]
172; CHECK-NEXT:    [[SHUFFLED:%.*]] = shufflevector <8 x i8> undef, <8 x i8> [[V2]], <4 x i32> <i32 0, i32 2, i32 4, i32 6>
173; CHECK-NEXT:    store <4 x i8> [[SHUFFLED]], ptr [[P2:%.*]], align 4
174; CHECK-NEXT:    ret void
175;
176entry:
177  %v0 = load <8 x i8>, ptr %p0
178  %v1 = load <8 x i8>, ptr %p1
179  %v2 = add <8 x i8> %v0, %v1
180  %shuffled = shufflevector <8 x i8> undef, <8 x i8> %v2, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
181  store <4 x i8> %shuffled, ptr %p2
182  ret void
183}
184
185define <4 x float> @noncanonical3(ptr %pSrc) {
186; CHECK-LABEL: @noncanonical3(
187; CHECK-NEXT:  entry:
188; CHECK-NEXT:    [[WIDE_VEC:%.*]] = load <8 x float>, ptr [[PSRC:%.*]], align 4
189; CHECK-NEXT:    [[L2:%.*]] = fmul fast <8 x float> [[WIDE_VEC]], [[WIDE_VEC]]
190; CHECK-NEXT:    [[L3:%.*]] = shufflevector <8 x float> undef, <8 x float> [[L2]], <4 x i32> <i32 8, i32 10, i32 12, i32 14>
191; CHECK-NEXT:    [[L4:%.*]] = fmul fast <8 x float> [[WIDE_VEC]], [[WIDE_VEC]]
192; CHECK-NEXT:    [[L5:%.*]] = shufflevector <8 x float> [[L4]], <8 x float> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
193; CHECK-NEXT:    [[L6:%.*]] = fadd fast <4 x float> [[L5]], [[L3]]
194; CHECK-NEXT:    ret <4 x float> [[L6]]
195;
196entry:
197  %wide.vec = load <8 x float>, ptr %pSrc, align 4
198  %l2 = fmul fast <8 x float> %wide.vec, %wide.vec
199  %l3 = shufflevector <8 x float> undef, <8 x float> %l2, <4 x i32> <i32 8, i32 10, i32 12, i32 14>
200  %l4 = fmul fast <8 x float> %wide.vec, %wide.vec
201  %l5 = shufflevector <8 x float> %l4, <8 x float> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
202  %l6 = fadd fast <4 x float> %l5, %l3
203  ret <4 x float> %l6
204}
205
206define void @noncanonical_extmask(ptr %p0, ptr %p1, ptr %p2) {
207; CHECK-LABEL: @noncanonical_extmask(
208; CHECK-NEXT:  entry:
209; CHECK-NEXT:    [[V0:%.*]] = load <8 x i8>, ptr [[P0:%.*]], align 8
210; CHECK-NEXT:    [[V1:%.*]] = add <8 x i8> [[V0]], <i8 0, i8 1, i8 2, i8 3, i8 7, i8 7, i8 7, i8 7>
211; CHECK-NEXT:    [[V2:%.*]] = load <8 x i8>, ptr [[P1:%.*]], align 8
212; CHECK-NEXT:    [[SHUFFLED:%.*]] = shufflevector <8 x i8> [[V2]], <8 x i8> [[V1]], <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
213; CHECK-NEXT:    store <8 x i8> [[SHUFFLED]], ptr [[P2:%.*]], align 8
214; CHECK-NEXT:    ret void
215;
216entry:
217  %v0 = load <8 x i8>, ptr %p0
218  %v1 = add <8 x i8> %v0, <i8 0, i8 1, i8 2, i8 3, i8 7, i8 7, i8 7, i8 7>
219  %v2 = load <8 x i8>, ptr %p1
220  %shuffled = shufflevector <8 x i8> %v2, <8 x i8> %v1, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
221  store <8 x i8> %shuffled, ptr %p2
222  ret void
223}
224
225define void @skip_optimizing_dead_binop(ptr %p0, ptr %p1) {
226; CHECK-LABEL: @skip_optimizing_dead_binop(
227; CHECK-NEXT:  entry:
228; CHECK-NEXT:    [[V0:%.*]] = load <8 x double>, ptr [[P0:%.*]]
229; CHECK-NEXT:    [[SHUFFLED_1:%.*]] = shufflevector <8 x double> [[V0]], <8 x double> undef, <2 x i32> <i32 0, i32 4>
230; CHECK-NEXT:    [[SHUFFLED_2:%.*]] = shufflevector <8 x double> [[V0]], <8 x double> undef, <2 x i32> <i32 1, i32 5>
231; CHECK-NEXT:    [[SHUFFLED_3:%.*]] = shufflevector <8 x double> [[V0]], <8 x double> undef, <2 x i32> <i32 2, i32 6>
232; CHECK-NEXT:    [[SHUFFLED_4:%.*]] = shufflevector <8 x double> [[V0]], <8 x double> undef, <2 x i32> <i32 3, i32 7>
233; CHECK-NEXT:    [[DEAD_BINOP:%.*]] = fadd <8 x double> [[V0]], [[V0]]
234; CHECK-NEXT:    ret void
235;
236entry:
237  %v0 = load <8 x double>, ptr %p0
238  %shuffled_1 = shufflevector <8 x double> %v0, <8 x double> undef, <2 x i32> <i32 0, i32 4>
239  %shuffled_2 = shufflevector <8 x double> %v0, <8 x double> undef, <2 x i32> <i32 1, i32 5>
240  %shuffled_3 = shufflevector <8 x double> %v0, <8 x double> undef, <2 x i32> <i32 2, i32 6>
241  %shuffled_4 = shufflevector <8 x double> %v0, <8 x double> undef, <2 x i32> <i32 3, i32 7>
242  %dead_binop = fadd <8 x double> %v0, %v0
243  ret void
244}
245