xref: /llvm-project/llvm/test/CodeGen/X86/GlobalISel/legalize-or-v512.mir (revision b28bc5f5ad7da086f65f260c32494922fd392f6b)
1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx               -run-pass=legalizer %s -o - | FileCheck %s --check-prefixes=AVX
3# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx2              -run-pass=legalizer %s -o - | FileCheck %s --check-prefixes=AVX
4# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f           -run-pass=legalizer %s -o - | FileCheck %s --check-prefixes=AVX512
5# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f,+avx512bw -run-pass=legalizer %s -o - | FileCheck %s --check-prefixes=AVX512
6
7--- |
8  define void @test_or_v64i8() {
9    %ret = or <64 x i8> undef, undef
10    ret void
11  }
12
13  define void @test_or_v32i16() {
14    %ret = or <32 x i16> undef, undef
15    ret void
16  }
17
18  define void @test_or_v16i32() {
19    %ret = or <16 x i32> undef, undef
20    ret void
21  }
22
23  define void @test_or_v8i64() {
24    %ret = or <8 x i64> undef, undef
25    ret void
26  }
27
28  define <64 x i8> @test_or_v64i8_2(<64 x i8> %arg1, <64 x i8> %arg2) #0 {
29    %ret = or <64 x i8> %arg1, %arg2
30    ret <64 x i8> %ret
31  }
32...
33---
34name:            test_or_v64i8
35alignment:       16
36legalized:       false
37regBankSelected: false
38registers:
39  - { id: 0, class: _ }
40  - { id: 1, class: _ }
41  - { id: 2, class: _ }
42body:             |
43  bb.1 (%ir-block.0):
44    liveins: $zmm0, $zmm1
45
46    ; AVX-LABEL: name: test_or_v64i8
47    ; AVX: liveins: $zmm0, $zmm1
48    ; AVX-NEXT: {{  $}}
49    ; AVX-NEXT: [[DEF:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF
50    ; AVX-NEXT: [[DEF1:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF
51    ; AVX-NEXT: [[UV:%[0-9]+]]:_(<32 x s8>), [[UV1:%[0-9]+]]:_(<32 x s8>) = G_UNMERGE_VALUES [[DEF]](<64 x s8>)
52    ; AVX-NEXT: [[UV2:%[0-9]+]]:_(<32 x s8>), [[UV3:%[0-9]+]]:_(<32 x s8>) = G_UNMERGE_VALUES [[DEF1]](<64 x s8>)
53    ; AVX-NEXT: [[OR:%[0-9]+]]:_(<32 x s8>) = G_OR [[UV]], [[UV2]]
54    ; AVX-NEXT: [[OR1:%[0-9]+]]:_(<32 x s8>) = G_OR [[UV1]], [[UV3]]
55    ; AVX-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<64 x s8>) = G_CONCAT_VECTORS [[OR]](<32 x s8>), [[OR1]](<32 x s8>)
56    ; AVX-NEXT: $zmm0 = COPY [[CONCAT_VECTORS]](<64 x s8>)
57    ; AVX-NEXT: RET 0
58    ; AVX512-LABEL: name: test_or_v64i8
59    ; AVX512: liveins: $zmm0, $zmm1
60    ; AVX512-NEXT: {{  $}}
61    ; AVX512-NEXT: [[DEF:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF
62    ; AVX512-NEXT: [[DEF1:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF
63    ; AVX512-NEXT: [[OR:%[0-9]+]]:_(<64 x s8>) = G_OR [[DEF]], [[DEF1]]
64    ; AVX512-NEXT: $zmm0 = COPY [[OR]](<64 x s8>)
65    ; AVX512-NEXT: RET 0
66    %0(<64 x s8>) = IMPLICIT_DEF
67    %1(<64 x s8>) = IMPLICIT_DEF
68    %2(<64 x s8>) = G_OR %0, %1
69    $zmm0 = COPY %2
70    RET 0
71...
72---
73name:            test_or_v32i16
74alignment:       16
75legalized:       false
76regBankSelected: false
77registers:
78  - { id: 0, class: _ }
79  - { id: 1, class: _ }
80  - { id: 2, class: _ }
81body:             |
82  bb.1 (%ir-block.0):
83    liveins: $zmm0, $zmm1
84
85    ; AVX-LABEL: name: test_or_v32i16
86    ; AVX: liveins: $zmm0, $zmm1
87    ; AVX-NEXT: {{  $}}
88    ; AVX-NEXT: [[DEF:%[0-9]+]]:_(<32 x s16>) = IMPLICIT_DEF
89    ; AVX-NEXT: [[DEF1:%[0-9]+]]:_(<32 x s16>) = IMPLICIT_DEF
90    ; AVX-NEXT: [[UV:%[0-9]+]]:_(<16 x s16>), [[UV1:%[0-9]+]]:_(<16 x s16>) = G_UNMERGE_VALUES [[DEF]](<32 x s16>)
91    ; AVX-NEXT: [[UV2:%[0-9]+]]:_(<16 x s16>), [[UV3:%[0-9]+]]:_(<16 x s16>) = G_UNMERGE_VALUES [[DEF1]](<32 x s16>)
92    ; AVX-NEXT: [[OR:%[0-9]+]]:_(<16 x s16>) = G_OR [[UV]], [[UV2]]
93    ; AVX-NEXT: [[OR1:%[0-9]+]]:_(<16 x s16>) = G_OR [[UV1]], [[UV3]]
94    ; AVX-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<32 x s16>) = G_CONCAT_VECTORS [[OR]](<16 x s16>), [[OR1]](<16 x s16>)
95    ; AVX-NEXT: $zmm0 = COPY [[CONCAT_VECTORS]](<32 x s16>)
96    ; AVX-NEXT: RET 0
97    ; AVX512-LABEL: name: test_or_v32i16
98    ; AVX512: liveins: $zmm0, $zmm1
99    ; AVX512-NEXT: {{  $}}
100    ; AVX512-NEXT: [[DEF:%[0-9]+]]:_(<32 x s16>) = IMPLICIT_DEF
101    ; AVX512-NEXT: [[DEF1:%[0-9]+]]:_(<32 x s16>) = IMPLICIT_DEF
102    ; AVX512-NEXT: [[OR:%[0-9]+]]:_(<32 x s16>) = G_OR [[DEF]], [[DEF1]]
103    ; AVX512-NEXT: $zmm0 = COPY [[OR]](<32 x s16>)
104    ; AVX512-NEXT: RET 0
105    %0(<32 x s16>) = IMPLICIT_DEF
106    %1(<32 x s16>) = IMPLICIT_DEF
107    %2(<32 x s16>) = G_OR %0, %1
108    $zmm0 = COPY %2
109    RET 0
110...
111---
112name:            test_or_v16i32
113alignment:       16
114legalized:       false
115regBankSelected: false
116registers:
117  - { id: 0, class: _ }
118  - { id: 1, class: _ }
119  - { id: 2, class: _ }
120body:             |
121  bb.1 (%ir-block.0):
122    liveins: $zmm0, $zmm1
123
124    ; AVX-LABEL: name: test_or_v16i32
125    ; AVX: liveins: $zmm0, $zmm1
126    ; AVX-NEXT: {{  $}}
127    ; AVX-NEXT: [[DEF:%[0-9]+]]:_(<16 x s32>) = IMPLICIT_DEF
128    ; AVX-NEXT: [[DEF1:%[0-9]+]]:_(<16 x s32>) = IMPLICIT_DEF
129    ; AVX-NEXT: [[UV:%[0-9]+]]:_(<8 x s32>), [[UV1:%[0-9]+]]:_(<8 x s32>) = G_UNMERGE_VALUES [[DEF]](<16 x s32>)
130    ; AVX-NEXT: [[UV2:%[0-9]+]]:_(<8 x s32>), [[UV3:%[0-9]+]]:_(<8 x s32>) = G_UNMERGE_VALUES [[DEF1]](<16 x s32>)
131    ; AVX-NEXT: [[OR:%[0-9]+]]:_(<8 x s32>) = G_OR [[UV]], [[UV2]]
132    ; AVX-NEXT: [[OR1:%[0-9]+]]:_(<8 x s32>) = G_OR [[UV1]], [[UV3]]
133    ; AVX-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[OR]](<8 x s32>), [[OR1]](<8 x s32>)
134    ; AVX-NEXT: $zmm0 = COPY [[CONCAT_VECTORS]](<16 x s32>)
135    ; AVX-NEXT: RET 0
136    ; AVX512-LABEL: name: test_or_v16i32
137    ; AVX512: liveins: $zmm0, $zmm1
138    ; AVX512-NEXT: {{  $}}
139    ; AVX512-NEXT: [[DEF:%[0-9]+]]:_(<16 x s32>) = IMPLICIT_DEF
140    ; AVX512-NEXT: [[DEF1:%[0-9]+]]:_(<16 x s32>) = IMPLICIT_DEF
141    ; AVX512-NEXT: [[OR:%[0-9]+]]:_(<16 x s32>) = G_OR [[DEF]], [[DEF1]]
142    ; AVX512-NEXT: $zmm0 = COPY [[OR]](<16 x s32>)
143    ; AVX512-NEXT: RET 0
144    %0(<16 x s32>) = IMPLICIT_DEF
145    %1(<16 x s32>) = IMPLICIT_DEF
146    %2(<16 x s32>) = G_OR %0, %1
147    $zmm0 = COPY %2
148    RET 0
149...
150---
151name:            test_or_v8i64
152alignment:       16
153legalized:       false
154regBankSelected: false
155registers:
156  - { id: 0, class: _ }
157  - { id: 1, class: _ }
158  - { id: 2, class: _ }
159body:             |
160  bb.1 (%ir-block.0):
161    liveins: $zmm0, $zmm1
162
163    ; AVX-LABEL: name: test_or_v8i64
164    ; AVX: liveins: $zmm0, $zmm1
165    ; AVX-NEXT: {{  $}}
166    ; AVX-NEXT: [[DEF:%[0-9]+]]:_(<8 x s64>) = IMPLICIT_DEF
167    ; AVX-NEXT: [[DEF1:%[0-9]+]]:_(<8 x s64>) = IMPLICIT_DEF
168    ; AVX-NEXT: [[UV:%[0-9]+]]:_(<4 x s64>), [[UV1:%[0-9]+]]:_(<4 x s64>) = G_UNMERGE_VALUES [[DEF]](<8 x s64>)
169    ; AVX-NEXT: [[UV2:%[0-9]+]]:_(<4 x s64>), [[UV3:%[0-9]+]]:_(<4 x s64>) = G_UNMERGE_VALUES [[DEF1]](<8 x s64>)
170    ; AVX-NEXT: [[OR:%[0-9]+]]:_(<4 x s64>) = G_OR [[UV]], [[UV2]]
171    ; AVX-NEXT: [[OR1:%[0-9]+]]:_(<4 x s64>) = G_OR [[UV1]], [[UV3]]
172    ; AVX-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s64>) = G_CONCAT_VECTORS [[OR]](<4 x s64>), [[OR1]](<4 x s64>)
173    ; AVX-NEXT: $zmm0 = COPY [[CONCAT_VECTORS]](<8 x s64>)
174    ; AVX-NEXT: RET 0
175    ; AVX512-LABEL: name: test_or_v8i64
176    ; AVX512: liveins: $zmm0, $zmm1
177    ; AVX512-NEXT: {{  $}}
178    ; AVX512-NEXT: [[DEF:%[0-9]+]]:_(<8 x s64>) = IMPLICIT_DEF
179    ; AVX512-NEXT: [[DEF1:%[0-9]+]]:_(<8 x s64>) = IMPLICIT_DEF
180    ; AVX512-NEXT: [[OR:%[0-9]+]]:_(<8 x s64>) = G_OR [[DEF]], [[DEF1]]
181    ; AVX512-NEXT: $zmm0 = COPY [[OR]](<8 x s64>)
182    ; AVX512-NEXT: RET 0
183    %0(<8 x s64>) = IMPLICIT_DEF
184    %1(<8 x s64>) = IMPLICIT_DEF
185    %2(<8 x s64>) = G_OR %0, %1
186    $zmm0 = COPY %2
187    RET 0
188...
189---
190name:            test_or_v64i8_2
191alignment:       16
192legalized:       false
193regBankSelected: false
194registers:
195  - { id: 0, class: _ }
196  - { id: 1, class: _ }
197  - { id: 2, class: _ }
198  - { id: 3, class: _ }
199  - { id: 4, class: _ }
200  - { id: 5, class: _ }
201  - { id: 6, class: _ }
202  - { id: 7, class: _ }
203  - { id: 8, class: _ }
204#
205#
206body:             |
207  bb.1 (%ir-block.0):
208    liveins: $ymm0, $ymm1, $ymm2, $ymm3
209    ; AVX-LABEL: name: test_or_v64i8_2
210    ; AVX: liveins: $ymm0, $ymm1, $ymm2, $ymm3
211    ; AVX-NEXT: {{  $}}
212    ; AVX-NEXT: [[COPY:%[0-9]+]]:_(<32 x s8>) = COPY $ymm0
213    ; AVX-NEXT: [[COPY1:%[0-9]+]]:_(<32 x s8>) = COPY $ymm1
214    ; AVX-NEXT: [[COPY2:%[0-9]+]]:_(<32 x s8>) = COPY $ymm2
215    ; AVX-NEXT: [[COPY3:%[0-9]+]]:_(<32 x s8>) = COPY $ymm3
216    ; AVX-NEXT: [[OR:%[0-9]+]]:_(<32 x s8>) = G_OR [[COPY]], [[COPY2]]
217    ; AVX-NEXT: [[OR1:%[0-9]+]]:_(<32 x s8>) = G_OR [[COPY1]], [[COPY3]]
218    ; AVX-NEXT: $ymm0 = COPY [[OR]](<32 x s8>)
219    ; AVX-NEXT: $ymm1 = COPY [[OR1]](<32 x s8>)
220    ; AVX-NEXT: RET 0, implicit $ymm0, implicit $ymm1
221    ; AVX512-LABEL: name: test_or_v64i8_2
222    ; AVX512: liveins: $ymm0, $ymm1, $ymm2, $ymm3
223    ; AVX512-NEXT: {{  $}}
224    ; AVX512-NEXT: [[COPY:%[0-9]+]]:_(<32 x s8>) = COPY $ymm0
225    ; AVX512-NEXT: [[COPY1:%[0-9]+]]:_(<32 x s8>) = COPY $ymm1
226    ; AVX512-NEXT: [[COPY2:%[0-9]+]]:_(<32 x s8>) = COPY $ymm2
227    ; AVX512-NEXT: [[COPY3:%[0-9]+]]:_(<32 x s8>) = COPY $ymm3
228    ; AVX512-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<64 x s8>) = G_CONCAT_VECTORS [[COPY]](<32 x s8>), [[COPY1]](<32 x s8>)
229    ; AVX512-NEXT: [[CONCAT_VECTORS1:%[0-9]+]]:_(<64 x s8>) = G_CONCAT_VECTORS [[COPY2]](<32 x s8>), [[COPY3]](<32 x s8>)
230    ; AVX512-NEXT: [[OR:%[0-9]+]]:_(<64 x s8>) = G_OR [[CONCAT_VECTORS]], [[CONCAT_VECTORS1]]
231    ; AVX512-NEXT: [[UV:%[0-9]+]]:_(<32 x s8>), [[UV1:%[0-9]+]]:_(<32 x s8>) = G_UNMERGE_VALUES [[OR]](<64 x s8>)
232    ; AVX512-NEXT: $ymm0 = COPY [[UV]](<32 x s8>)
233    ; AVX512-NEXT: $ymm1 = COPY [[UV1]](<32 x s8>)
234    ; AVX512-NEXT: RET 0, implicit $ymm0, implicit $ymm1
235    %2(<32 x s8>) = COPY $ymm0
236    %3(<32 x s8>) = COPY $ymm1
237    %4(<32 x s8>) = COPY $ymm2
238    %5(<32 x s8>) = COPY $ymm3
239    %0(<64 x s8>) = G_CONCAT_VECTORS %2(<32 x s8>), %3(<32 x s8>)
240    %1(<64 x s8>) = G_CONCAT_VECTORS %4(<32 x s8>), %5(<32 x s8>)
241    %6(<64 x s8>) = G_OR %0, %1
242    %7(<32 x s8>), %8(<32 x s8>) = G_UNMERGE_VALUES %6(<64 x s8>)
243    $ymm0 = COPY %7(<32 x s8>)
244    $ymm1 = COPY %8(<32 x s8>)
245    RET 0, implicit $ymm0, implicit $ymm1
246...
247