xref: /llvm-project/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-ext.mir (revision 363ec6f6911afe5b2ab640d6a7d778908c58b3bd)
1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2# RUN: llc -mtriple aarch64 -run-pass=aarch64-postlegalizer-lowering -verify-machineinstrs %s -o - | FileCheck %s
3#
4# Check that we can combine a G_SHUFFLE_VECTOR into a G_EXT.
5
6...
7---
8name:            v8s8_cst3
9alignment:       4
10legalized:       true
11tracksRegLiveness: true
12body:             |
13  bb.0:
14    liveins: $d0, $d1
15    ; CHECK-LABEL: name: v8s8_cst3
16    ; CHECK: liveins: $d0, $d1
17    ; CHECK-NEXT: {{  $}}
18    ; CHECK-NEXT: %v1:_(<8 x s8>) = COPY $d0
19    ; CHECK-NEXT: %v2:_(<8 x s8>) = COPY $d1
20    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
21    ; CHECK-NEXT: %shuf:_(<8 x s8>) = G_EXT %v1, %v2, [[C]](s32)
22    ; CHECK-NEXT: $d0 = COPY %shuf(<8 x s8>)
23    ; CHECK-NEXT: RET_ReallyLR implicit $d0
24    %v1:_(<8 x s8>) = COPY $d0
25    %v2:_(<8 x s8>) = COPY $d1
26    %shuf:_(<8 x s8>) = G_SHUFFLE_VECTOR %v1(<8 x s8>), %v2, shufflemask(3, 4, 5, 6, 7, 8, 9, 10)
27    $d0 = COPY %shuf(<8 x s8>)
28    RET_ReallyLR implicit $d0
29...
30---
31name:            v8s8_cst5
32alignment:       4
33legalized:       true
34tracksRegLiveness: true
35body:             |
36  bb.0:
37    liveins: $d0, $d1
38    ; CHECK-LABEL: name: v8s8_cst5
39    ; CHECK: liveins: $d0, $d1
40    ; CHECK-NEXT: {{  $}}
41    ; CHECK-NEXT: %v1:_(<8 x s8>) = COPY $d0
42    ; CHECK-NEXT: %v2:_(<8 x s8>) = COPY $d1
43    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
44    ; CHECK-NEXT: %shuf:_(<8 x s8>) = G_EXT %v2, %v1, [[C]](s32)
45    ; CHECK-NEXT: $d0 = COPY %shuf(<8 x s8>)
46    ; CHECK-NEXT: RET_ReallyLR implicit $d0
47    %v1:_(<8 x s8>) = COPY $d0
48    %v2:_(<8 x s8>) = COPY $d1
49    %shuf:_(<8 x s8>) = G_SHUFFLE_VECTOR %v1(<8 x s8>), %v2, shufflemask(13, 14, 15, 0, 1, 2, 3, 4)
50    $d0 = COPY %shuf(<8 x s8>)
51    RET_ReallyLR implicit $d0
52...
53---
54name:            v16s8_cst3
55alignment:       4
56legalized:       true
57tracksRegLiveness: true
58body:             |
59  bb.0:
60    liveins: $q0, $q1
61    ; CHECK-LABEL: name: v16s8_cst3
62    ; CHECK: liveins: $q0, $q1
63    ; CHECK-NEXT: {{  $}}
64    ; CHECK-NEXT: %v1:_(<16 x s8>) = COPY $q0
65    ; CHECK-NEXT: %v2:_(<16 x s8>) = COPY $q1
66    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
67    ; CHECK-NEXT: %shuf:_(<16 x s8>) = G_EXT %v1, %v2, [[C]](s32)
68    ; CHECK-NEXT: $q0 = COPY %shuf(<16 x s8>)
69    ; CHECK-NEXT: RET_ReallyLR implicit $q0
70    %v1:_(<16 x s8>) = COPY $q0
71    %v2:_(<16 x s8>) = COPY $q1
72    %shuf:_(<16 x s8>) = G_SHUFFLE_VECTOR %v1(<16 x s8>), %v2, shufflemask(3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18)
73    $q0 = COPY %shuf(<16 x s8>)
74    RET_ReallyLR implicit $q0
75...
76---
77name:            v16s8_cst7
78alignment:       4
79legalized:       true
80tracksRegLiveness: true
81body:             |
82  bb.0:
83    liveins: $q0, $q1
84    ; CHECK-LABEL: name: v16s8_cst7
85    ; CHECK: liveins: $q0, $q1
86    ; CHECK-NEXT: {{  $}}
87    ; CHECK-NEXT: %v1:_(<16 x s8>) = COPY $q0
88    ; CHECK-NEXT: %v2:_(<16 x s8>) = COPY $q1
89    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
90    ; CHECK-NEXT: %shuf:_(<16 x s8>) = G_EXT %v2, %v1, [[C]](s32)
91    ; CHECK-NEXT: $q0 = COPY %shuf(<16 x s8>)
92    ; CHECK-NEXT: RET_ReallyLR implicit $q0
93    %v1:_(<16 x s8>) = COPY $q0
94    %v2:_(<16 x s8>) = COPY $q1
95    %shuf:_(<16 x s8>) = G_SHUFFLE_VECTOR %v1(<16 x s8>), %v2, shufflemask(23, 24, 25, 26, 27, 28, 29, 30, 31, 0, 1, 2, 3, 4, 5, 6)
96    $q0 = COPY %shuf(<16 x s8>)
97    RET_ReallyLR implicit $q0
98...
99---
100name:            v4s16_cst6
101alignment:       4
102legalized:       true
103tracksRegLiveness: true
104body:             |
105  bb.0:
106    liveins: $d0, $d1
107    ; CHECK-LABEL: name: v4s16_cst6
108    ; CHECK: liveins: $d0, $d1
109    ; CHECK-NEXT: {{  $}}
110    ; CHECK-NEXT: %v1:_(<4 x s16>) = COPY $d0
111    ; CHECK-NEXT: %v2:_(<4 x s16>) = COPY $d1
112    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
113    ; CHECK-NEXT: %shuf:_(<4 x s16>) = G_EXT %v1, %v2, [[C]](s32)
114    ; CHECK-NEXT: $d0 = COPY %shuf(<4 x s16>)
115    ; CHECK-NEXT: RET_ReallyLR implicit $d0
116    %v1:_(<4 x s16>) = COPY $d0
117    %v2:_(<4 x s16>) = COPY $d1
118    %shuf:_(<4 x s16>) = G_SHUFFLE_VECTOR %v1(<4 x s16>), %v2, shufflemask(3, 4, 5, 6)
119    $d0 = COPY %shuf(<4 x s16>)
120    RET_ReallyLR implicit $d0
121...
122---
123name:            v4s32_cst12
124alignment:       4
125legalized:       true
126tracksRegLiveness: true
127body:             |
128  bb.0:
129    liveins: $q0, $q1
130    ; CHECK-LABEL: name: v4s32_cst12
131    ; CHECK: liveins: $q0, $q1
132    ; CHECK-NEXT: {{  $}}
133    ; CHECK-NEXT: %v1:_(<4 x s32>) = COPY $q0
134    ; CHECK-NEXT: %v2:_(<4 x s32>) = COPY $q1
135    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
136    ; CHECK-NEXT: %shuf:_(<4 x s32>) = G_EXT %v1, %v2, [[C]](s32)
137    ; CHECK-NEXT: $q0 = COPY %shuf(<4 x s32>)
138    ; CHECK-NEXT: RET_ReallyLR implicit $q0
139    %v1:_(<4 x s32>) = COPY $q0
140    %v2:_(<4 x s32>) = COPY $q1
141    %shuf:_(<4 x s32>) = G_SHUFFLE_VECTOR %v1(<4 x s32>), %v2, shufflemask(3, 4, 5, 6)
142    $q0 = COPY %shuf(<4 x s32>)
143    RET_ReallyLR implicit $q0
144...
145---
146name:            undef_elts_should_match_1
147alignment:       4
148legalized:       true
149tracksRegLiveness: true
150body:             |
151  bb.0:
152    liveins: $d0, $d1
153    ; Undef shuffle indices should not prevent matching G_EXT.
154    ; We should get a constant 3 here.
155    ;
156    ; CHECK-LABEL: name: undef_elts_should_match_1
157    ; CHECK: liveins: $d0, $d1
158    ; CHECK-NEXT: {{  $}}
159    ; CHECK-NEXT: %v1:_(<8 x s8>) = COPY $d0
160    ; CHECK-NEXT: %v2:_(<8 x s8>) = COPY $d1
161    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
162    ; CHECK-NEXT: %shuf:_(<8 x s8>) = G_EXT %v1, %v2, [[C]](s32)
163    ; CHECK-NEXT: $d0 = COPY %shuf(<8 x s8>)
164    ; CHECK-NEXT: RET_ReallyLR implicit $d0
165    %v1:_(<8 x s8>) = COPY $d0
166    %v2:_(<8 x s8>) = COPY $d1
167    %shuf:_(<8 x s8>) = G_SHUFFLE_VECTOR %v1(<8 x s8>), %v2, shufflemask(3, -1, -1, 6, 7, 8, 9, 10)
168    $d0 = COPY %shuf(<8 x s8>)
169    RET_ReallyLR implicit $d0
170...
171---
172name:            undef_elts_should_match_2
173alignment:       4
174legalized:       true
175tracksRegLiveness: true
176body:             |
177  bb.0:
178    liveins: $d0, $d1
179    ; Undef shuffle indices should not prevent matching G_EXT.
180    ; We should get a constant 6 here.
181    ;
182    ; CHECK-LABEL: name: undef_elts_should_match_2
183    ; CHECK: liveins: $d0, $d1
184    ; CHECK-NEXT: {{  $}}
185    ; CHECK-NEXT: %v1:_(<8 x s8>) = COPY $d0
186    ; CHECK-NEXT: %v2:_(<8 x s8>) = COPY $d1
187    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
188    ; CHECK-NEXT: %shuf:_(<8 x s8>) = G_EXT %v2, %v1, [[C]](s32)
189    ; CHECK-NEXT: $d0 = COPY %shuf(<8 x s8>)
190    ; CHECK-NEXT: RET_ReallyLR implicit $d0
191    %v1:_(<8 x s8>) = COPY $d0
192    %v2:_(<8 x s8>) = COPY $d1
193    %shuf:_(<8 x s8>) = G_SHUFFLE_VECTOR %v1(<8 x s8>), %v2, shufflemask(-1, -1, -1, -1, 2, 3, 4, 5)
194    $d0 = COPY %shuf(<8 x s8>)
195    RET_ReallyLR implicit $d0
196...
197---
198name:            undef_elts_should_match_3
199alignment:       4
200legalized:       true
201tracksRegLiveness: true
202body:             |
203  bb.0:
204    liveins: $q0, $q1
205    ; Undef shuffle indices should not prevent matching G_EXT.
206    ; We should get a constant 7 here.
207    ; CHECK-LABEL: name: undef_elts_should_match_3
208    ; CHECK: liveins: $q0, $q1
209    ; CHECK-NEXT: {{  $}}
210    ; CHECK-NEXT: %v1:_(<16 x s8>) = COPY $q0
211    ; CHECK-NEXT: %v2:_(<16 x s8>) = COPY $q1
212    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
213    ; CHECK-NEXT: %shuf:_(<16 x s8>) = G_EXT %v2, %v1, [[C]](s32)
214    ; CHECK-NEXT: $q0 = COPY %shuf(<16 x s8>)
215    ; CHECK-NEXT: RET_ReallyLR implicit $q0
216    %v1:_(<16 x s8>) = COPY $q0
217    %v2:_(<16 x s8>) = COPY $q1
218    %shuf:_(<16 x s8>) = G_SHUFFLE_VECTOR %v1(<16 x s8>), %v2, shufflemask(23, 24, 25, 26, -1, -1, 29, 30, 31, 0, 1, 2, 3, 4, -1, 6)
219    $q0 = COPY %shuf(<16 x s8>)
220    RET_ReallyLR implicit $q0
221...
222---
223name:            undef_elts_should_match_4
224alignment:       4
225legalized:       true
226tracksRegLiveness: true
227body:             |
228  bb.0:
229    liveins: $q0, $q1
230    ; Undef shuffle indices should not prevent matching G_EXT.
231    ; We should get a constant 10 here.
232    ; CHECK-LABEL: name: undef_elts_should_match_4
233    ; CHECK: liveins: $q0, $q1
234    ; CHECK-NEXT: {{  $}}
235    ; CHECK-NEXT: %v1:_(<8 x s16>) = COPY $q0
236    ; CHECK-NEXT: %v2:_(<8 x s16>) = COPY $q1
237    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
238    ; CHECK-NEXT: %shuf:_(<8 x s16>) = G_EXT %v2, %v1, [[C]](s32)
239    ; CHECK-NEXT: $q0 = COPY %shuf(<8 x s16>)
240    ; CHECK-NEXT: RET_ReallyLR implicit $q0
241    %v1:_(<8 x s16>) = COPY $q0
242    %v2:_(<8 x s16>) = COPY $q1
243    %shuf:_(<8 x s16>) = G_SHUFFLE_VECTOR %v1(<8 x s16>), %v2, shufflemask(-1, -1, -1, -1, 1, 2, 3, 4)
244    $q0 = COPY %shuf(<8 x s16>)
245    RET_ReallyLR implicit $q0
246...
247---
248name:            all_undef
249alignment:       4
250legalized:       true
251tracksRegLiveness: true
252body:             |
253  bb.0:
254    liveins: $q0, $q1
255    ; We expect at least one defined element in the shuffle mask.
256    ;
257    ; CHECK-LABEL: name: all_undef
258    ; CHECK: liveins: $q0, $q1
259    ; CHECK-NEXT: {{  $}}
260    ; CHECK-NEXT: %v1:_(<8 x s16>) = COPY $q0
261    ; CHECK-NEXT: %shuf:_(<8 x s16>) = G_REV64 %v1
262    ; CHECK-NEXT: $q0 = COPY %shuf(<8 x s16>)
263    ; CHECK-NEXT: RET_ReallyLR implicit $q0
264    %v1:_(<8 x s16>) = COPY $q0
265    %v2:_(<8 x s16>) = COPY $q1
266    %shuf:_(<8 x s16>) = G_SHUFFLE_VECTOR %v1(<8 x s16>), %v2, shufflemask(-1, -1, -1, -1, -1, -1, -1, -1)
267    $q0 = COPY %shuf(<8 x s16>)
268    RET_ReallyLR implicit $q0
269...
270---
271name:            v2s64_singleton_ext
272alignment:       4
273legalized:       true
274tracksRegLiveness: true
275body:             |
276  bb.0:
277    liveins: $q0
278    ; CHECK-LABEL: name: v2s64_singleton_ext
279    ; CHECK: liveins: $q0
280    ; CHECK-NEXT: {{  $}}
281    ; CHECK-NEXT: %v1:_(<2 x s64>) = COPY $q0
282    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
283    ; CHECK-NEXT: %shuf:_(<2 x s64>) = G_EXT %v1, %v1, [[C]](s32)
284    ; CHECK-NEXT: $q0 = COPY %shuf(<2 x s64>)
285    ; CHECK-NEXT: RET_ReallyLR implicit $q0
286    %v1:_(<2 x s64>) = COPY $q0
287    %v2:_(<2 x s64>) = G_IMPLICIT_DEF
288    %shuf:_(<2 x s64>) = G_SHUFFLE_VECTOR %v1(<2 x s64>), %v2, shufflemask(1, 0)
289    $q0 = COPY %shuf(<2 x s64>)
290    RET_ReallyLR implicit $q0
291...
292---
293name:            v2s64_singleton_ext_all_undef
294alignment:       4
295legalized:       true
296tracksRegLiveness: true
297body:             |
298  bb.0:
299    liveins: $q0
300    ; CHECK-LABEL: name: v2s64_singleton_ext_all_undef
301    ; CHECK: liveins: $q0
302    ; CHECK-NEXT: {{  $}}
303    ; CHECK-NEXT: %v1:_(<2 x s64>) = COPY $q0
304    ; CHECK-NEXT: %v2:_(<2 x s64>) = G_IMPLICIT_DEF
305    ; CHECK-NEXT: %shuf:_(<2 x s64>) = G_TRN2 %v1, %v2
306    ; CHECK-NEXT: $q0 = COPY %shuf(<2 x s64>)
307    ; CHECK-NEXT: RET_ReallyLR implicit $q0
308    %v1:_(<2 x s64>) = COPY $q0
309    %v2:_(<2 x s64>) = G_IMPLICIT_DEF
310    %shuf:_(<2 x s64>) = G_SHUFFLE_VECTOR %v1(<2 x s64>), %v2, shufflemask(undef, undef)
311    $q0 = COPY %shuf(<2 x s64>)
312    RET_ReallyLR implicit $q0
313...
314---
315name:            v2s64_singleton_ext_same
316alignment:       4
317legalized:       true
318tracksRegLiveness: true
319body:             |
320  bb.0:
321    liveins: $q0
322    ; CHECK-LABEL: name: v2s64_singleton_ext_same
323    ; CHECK: liveins: $q0
324    ; CHECK-NEXT: {{  $}}
325    ; CHECK-NEXT: %v1:_(<2 x s64>) = COPY $q0
326    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
327    ; CHECK-NEXT: %shuf:_(<2 x s64>) = G_DUPLANE64 %v1, [[C]](s64)
328    ; CHECK-NEXT: $q0 = COPY %shuf(<2 x s64>)
329    ; CHECK-NEXT: RET_ReallyLR implicit $q0
330    %v1:_(<2 x s64>) = COPY $q0
331    %v2:_(<2 x s64>) = G_IMPLICIT_DEF
332    %shuf:_(<2 x s64>) = G_SHUFFLE_VECTOR %v1(<2 x s64>), %v2, shufflemask(1, 1)
333    $q0 = COPY %shuf(<2 x s64>)
334    RET_ReallyLR implicit $q0
335...
336