xref: /llvm-project/llvm/test/CodeGen/X86/GlobalISel/legalize-add-v256.mir (revision c63be92fc80c70d56f22abc5aa024ab957f8d4cd)
1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+sse2 -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=SSE2
3# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx  -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=AVX1
4# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx2 -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=AVX2
5
6# TODO: add tests for additional configuration after the legalization supported
7
8--- |
9  define void @test_add_v32i8() {
10    %ret = add <32 x i8> undef, undef
11    ret void
12  }
13
14  define void @test_add_v16i16() {
15    %ret = add <16 x i16> undef, undef
16    ret void
17  }
18
19  define void @test_add_v8i32() {
20    %ret = add <8 x i32> undef, undef
21    ret void
22  }
23
24  define void @test_add_v4i64() {
25    %ret = add <4 x i64> undef, undef
26    ret void
27  }
28
29...
30---
31name:            test_add_v32i8
32alignment:       16
33legalized:       false
34regBankSelected: false
35registers:
36  - { id: 0, class: _ }
37  - { id: 1, class: _ }
38  - { id: 2, class: _ }
39body:             |
40  bb.1 (%ir-block.0):
41    liveins: $ymm0, $ymm1
42
43    ; SSE2-LABEL: name: test_add_v32i8
44    ; SSE2: liveins: $ymm0, $ymm1
45    ; SSE2-NEXT: {{  $}}
46    ; SSE2-NEXT: [[DEF:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
47    ; SSE2-NEXT: [[DEF1:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
48    ; SSE2-NEXT: [[UV:%[0-9]+]]:_(<16 x s8>), [[UV1:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF]](<32 x s8>)
49    ; SSE2-NEXT: [[UV2:%[0-9]+]]:_(<16 x s8>), [[UV3:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF1]](<32 x s8>)
50    ; SSE2-NEXT: [[ADD:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV]], [[UV2]]
51    ; SSE2-NEXT: [[ADD1:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV1]], [[UV3]]
52    ; SSE2-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<32 x s8>) = G_CONCAT_VECTORS [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>)
53    ; SSE2-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<32 x s8>)
54    ; SSE2-NEXT: RET 0
55    ;
56    ; AVX1-LABEL: name: test_add_v32i8
57    ; AVX1: liveins: $ymm0, $ymm1
58    ; AVX1-NEXT: {{  $}}
59    ; AVX1-NEXT: [[DEF:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
60    ; AVX1-NEXT: [[DEF1:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
61    ; AVX1-NEXT: [[UV:%[0-9]+]]:_(<16 x s8>), [[UV1:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF]](<32 x s8>)
62    ; AVX1-NEXT: [[UV2:%[0-9]+]]:_(<16 x s8>), [[UV3:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF1]](<32 x s8>)
63    ; AVX1-NEXT: [[ADD:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV]], [[UV2]]
64    ; AVX1-NEXT: [[ADD1:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV1]], [[UV3]]
65    ; AVX1-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<32 x s8>) = G_CONCAT_VECTORS [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>)
66    ; AVX1-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<32 x s8>)
67    ; AVX1-NEXT: RET 0
68    ;
69    ; AVX2-LABEL: name: test_add_v32i8
70    ; AVX2: liveins: $ymm0, $ymm1
71    ; AVX2-NEXT: {{  $}}
72    ; AVX2-NEXT: [[DEF:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
73    ; AVX2-NEXT: [[DEF1:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
74    ; AVX2-NEXT: [[ADD:%[0-9]+]]:_(<32 x s8>) = G_ADD [[DEF]], [[DEF1]]
75    ; AVX2-NEXT: $ymm0 = COPY [[ADD]](<32 x s8>)
76    ; AVX2-NEXT: RET 0
77    %0(<32 x s8>) = IMPLICIT_DEF
78    %1(<32 x s8>) = IMPLICIT_DEF
79    %2(<32 x s8>) = G_ADD %0, %1
80    $ymm0 = COPY %2
81    RET 0
82
83...
84---
85name:            test_add_v16i16
86alignment:       16
87legalized:       false
88regBankSelected: false
89registers:
90  - { id: 0, class: _ }
91  - { id: 1, class: _ }
92  - { id: 2, class: _ }
93body:             |
94  bb.1 (%ir-block.0):
95    liveins: $ymm0, $ymm1
96
97    ; SSE2-LABEL: name: test_add_v16i16
98    ; SSE2: liveins: $ymm0, $ymm1
99    ; SSE2-NEXT: {{  $}}
100    ; SSE2-NEXT: [[DEF:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
101    ; SSE2-NEXT: [[DEF1:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
102    ; SSE2-NEXT: [[UV:%[0-9]+]]:_(<8 x s16>), [[UV1:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF]](<16 x s16>)
103    ; SSE2-NEXT: [[UV2:%[0-9]+]]:_(<8 x s16>), [[UV3:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF1]](<16 x s16>)
104    ; SSE2-NEXT: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV]], [[UV2]]
105    ; SSE2-NEXT: [[ADD1:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV1]], [[UV3]]
106    ; SSE2-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s16>) = G_CONCAT_VECTORS [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>)
107    ; SSE2-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<16 x s16>)
108    ; SSE2-NEXT: RET 0
109    ;
110    ; AVX1-LABEL: name: test_add_v16i16
111    ; AVX1: liveins: $ymm0, $ymm1
112    ; AVX1-NEXT: {{  $}}
113    ; AVX1-NEXT: [[DEF:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
114    ; AVX1-NEXT: [[DEF1:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
115    ; AVX1-NEXT: [[UV:%[0-9]+]]:_(<8 x s16>), [[UV1:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF]](<16 x s16>)
116    ; AVX1-NEXT: [[UV2:%[0-9]+]]:_(<8 x s16>), [[UV3:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF1]](<16 x s16>)
117    ; AVX1-NEXT: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV]], [[UV2]]
118    ; AVX1-NEXT: [[ADD1:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV1]], [[UV3]]
119    ; AVX1-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s16>) = G_CONCAT_VECTORS [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>)
120    ; AVX1-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<16 x s16>)
121    ; AVX1-NEXT: RET 0
122    ;
123    ; AVX2-LABEL: name: test_add_v16i16
124    ; AVX2: liveins: $ymm0, $ymm1
125    ; AVX2-NEXT: {{  $}}
126    ; AVX2-NEXT: [[DEF:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
127    ; AVX2-NEXT: [[DEF1:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
128    ; AVX2-NEXT: [[ADD:%[0-9]+]]:_(<16 x s16>) = G_ADD [[DEF]], [[DEF1]]
129    ; AVX2-NEXT: $ymm0 = COPY [[ADD]](<16 x s16>)
130    ; AVX2-NEXT: RET 0
131    %0(<16 x s16>) = IMPLICIT_DEF
132    %1(<16 x s16>) = IMPLICIT_DEF
133    %2(<16 x s16>) = G_ADD %0, %1
134    $ymm0 = COPY %2
135    RET 0
136
137...
138---
139name:            test_add_v8i32
140alignment:       16
141legalized:       false
142regBankSelected: false
143registers:
144  - { id: 0, class: _ }
145  - { id: 1, class: _ }
146  - { id: 2, class: _ }
147body:             |
148  bb.1 (%ir-block.0):
149    liveins: $ymm0, $ymm1
150
151    ; SSE2-LABEL: name: test_add_v8i32
152    ; SSE2: liveins: $ymm0, $ymm1
153    ; SSE2-NEXT: {{  $}}
154    ; SSE2-NEXT: [[DEF:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
155    ; SSE2-NEXT: [[DEF1:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
156    ; SSE2-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<8 x s32>)
157    ; SSE2-NEXT: [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF1]](<8 x s32>)
158    ; SSE2-NEXT: [[ADD:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV]], [[UV2]]
159    ; SSE2-NEXT: [[ADD1:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV1]], [[UV3]]
160    ; SSE2-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>)
161    ; SSE2-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<8 x s32>)
162    ; SSE2-NEXT: RET 0
163    ;
164    ; AVX1-LABEL: name: test_add_v8i32
165    ; AVX1: liveins: $ymm0, $ymm1
166    ; AVX1-NEXT: {{  $}}
167    ; AVX1-NEXT: [[DEF:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
168    ; AVX1-NEXT: [[DEF1:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
169    ; AVX1-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<8 x s32>)
170    ; AVX1-NEXT: [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF1]](<8 x s32>)
171    ; AVX1-NEXT: [[ADD:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV]], [[UV2]]
172    ; AVX1-NEXT: [[ADD1:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV1]], [[UV3]]
173    ; AVX1-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>)
174    ; AVX1-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<8 x s32>)
175    ; AVX1-NEXT: RET 0
176    ;
177    ; AVX2-LABEL: name: test_add_v8i32
178    ; AVX2: liveins: $ymm0, $ymm1
179    ; AVX2-NEXT: {{  $}}
180    ; AVX2-NEXT: [[DEF:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
181    ; AVX2-NEXT: [[DEF1:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
182    ; AVX2-NEXT: [[ADD:%[0-9]+]]:_(<8 x s32>) = G_ADD [[DEF]], [[DEF1]]
183    ; AVX2-NEXT: $ymm0 = COPY [[ADD]](<8 x s32>)
184    ; AVX2-NEXT: RET 0
185    %0(<8 x s32>) = IMPLICIT_DEF
186    %1(<8 x s32>) = IMPLICIT_DEF
187    %2(<8 x s32>) = G_ADD %0, %1
188    $ymm0 = COPY %2
189    RET 0
190
191...
192---
193name:            test_add_v4i64
194alignment:       16
195legalized:       false
196regBankSelected: false
197registers:
198  - { id: 0, class: _ }
199  - { id: 1, class: _ }
200  - { id: 2, class: _ }
201body:             |
202  bb.1 (%ir-block.0):
203    liveins: $ymm0, $ymm1
204
205    ; SSE2-LABEL: name: test_add_v4i64
206    ; SSE2: liveins: $ymm0, $ymm1
207    ; SSE2-NEXT: {{  $}}
208    ; SSE2-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
209    ; SSE2-NEXT: [[DEF1:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
210    ; SSE2-NEXT: [[UV:%[0-9]+]]:_(<2 x s64>), [[UV1:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF]](<4 x s64>)
211    ; SSE2-NEXT: [[UV2:%[0-9]+]]:_(<2 x s64>), [[UV3:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF1]](<4 x s64>)
212    ; SSE2-NEXT: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV]], [[UV2]]
213    ; SSE2-NEXT: [[ADD1:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV1]], [[UV3]]
214    ; SSE2-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[ADD]](<2 x s64>), [[ADD1]](<2 x s64>)
215    ; SSE2-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<4 x s64>)
216    ; SSE2-NEXT: RET 0
217    ;
218    ; AVX1-LABEL: name: test_add_v4i64
219    ; AVX1: liveins: $ymm0, $ymm1
220    ; AVX1-NEXT: {{  $}}
221    ; AVX1-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
222    ; AVX1-NEXT: [[DEF1:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
223    ; AVX1-NEXT: [[UV:%[0-9]+]]:_(<2 x s64>), [[UV1:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF]](<4 x s64>)
224    ; AVX1-NEXT: [[UV2:%[0-9]+]]:_(<2 x s64>), [[UV3:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF1]](<4 x s64>)
225    ; AVX1-NEXT: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV]], [[UV2]]
226    ; AVX1-NEXT: [[ADD1:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV1]], [[UV3]]
227    ; AVX1-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[ADD]](<2 x s64>), [[ADD1]](<2 x s64>)
228    ; AVX1-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<4 x s64>)
229    ; AVX1-NEXT: RET 0
230    ;
231    ; AVX2-LABEL: name: test_add_v4i64
232    ; AVX2: liveins: $ymm0, $ymm1
233    ; AVX2-NEXT: {{  $}}
234    ; AVX2-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
235    ; AVX2-NEXT: [[DEF1:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
236    ; AVX2-NEXT: [[ADD:%[0-9]+]]:_(<4 x s64>) = G_ADD [[DEF]], [[DEF1]]
237    ; AVX2-NEXT: $ymm0 = COPY [[ADD]](<4 x s64>)
238    ; AVX2-NEXT: RET 0
239    %0(<4 x s64>) = IMPLICIT_DEF
240    %1(<4 x s64>) = IMPLICIT_DEF
241    %2(<4 x s64>) = G_ADD %0, %1
242    $ymm0 = COPY %2
243    RET 0
244
245...
246