xref: /llvm-project/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-or-redundant.mir (revision 9e9907f1cfa424366fba58d9520f9305b537cec9)
1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2# RUN: llc -mtriple=amdgcn -run-pass=amdgpu-prelegalizer-combiner -verify-machineinstrs %s -o - | FileCheck %s
3
4---
5name:            test_const_const_1
6tracksRegLiveness: true
7body:             |
8  bb.0:
9    ; CHECK-LABEL: name: test_const_const_1
10    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
11    ; CHECK-NEXT: $sgpr0 = COPY [[C]](s32)
12    ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0
13    %0:_(s32) = G_CONSTANT i32 255
14    %1:_(s32) = G_CONSTANT i32 15
15    %2:_(s32) = G_OR %0(s32), %1(s32)
16    $sgpr0 = COPY %2(s32)
17    SI_RETURN_TO_EPILOG implicit $sgpr0
18...
19
20---
21name:            test_const_const_2
22tracksRegLiveness: true
23body:             |
24  bb.0:
25    ; CHECK-LABEL: name: test_const_const_2
26    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
27    ; CHECK-NEXT: $vgpr0 = COPY [[C]](s32)
28    ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
29    %0:_(s32) = G_CONSTANT i32 15
30    %1:_(s32) = G_CONSTANT i32 255
31    %2:_(s32) = G_OR %0(s32), %1(s32)
32    $vgpr0 = COPY %2(s32)
33    SI_RETURN_TO_EPILOG implicit $vgpr0
34...
35
36---
37name:            test_const_const_3
38tracksRegLiveness: true
39body:             |
40  bb.0:
41    ; CHECK-LABEL: name: test_const_const_3
42    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1431655765
43    ; CHECK-NEXT: $vgpr0 = COPY [[C]](s32)
44    ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
45    %0:_(s32) = G_CONSTANT i32 1431655765
46    %1:_(s32) = G_CONSTANT i32 1145324612
47    %2:_(s32) = G_OR %1(s32), %0(s32)
48    $vgpr0 = COPY %2(s32)
49    SI_RETURN_TO_EPILOG implicit $vgpr0
50...
51
52---
53name:            test_or_or
54tracksRegLiveness: true
55body:             |
56  bb.0:
57    liveins: $vgpr0
58
59    ; CHECK-LABEL: name: test_or_or
60    ; CHECK: liveins: $vgpr0
61    ; CHECK-NEXT: {{  $}}
62    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
63    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
64    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY]], [[C]]
65    ; CHECK-NEXT: $vgpr0 = COPY [[OR]](s32)
66    ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
67    %0:_(s32) = COPY $vgpr0
68    %1:_(s32) = G_CONSTANT i32 255
69    %2:_(s32) = G_CONSTANT i32 15
70    %3:_(s32) = G_OR %0, %1(s32)
71    %4:_(s32) = G_OR %3, %2
72    $vgpr0 = COPY %4(s32)
73    SI_RETURN_TO_EPILOG implicit $vgpr0
74...
75
76---
77name:            test_shl_xor_or
78tracksRegLiveness: true
79body:             |
80  bb.0:
81    liveins: $sgpr0
82
83    ; CHECK-LABEL: name: test_shl_xor_or
84    ; CHECK: liveins: $sgpr0
85    ; CHECK-NEXT: {{  $}}
86    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr0
87    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
88    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
89    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C]](s32)
90    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[SHL]], [[C1]]
91    ; CHECK-NEXT: $sgpr0 = COPY [[XOR]](s32)
92    ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0
93    %0:_(s32) = COPY $sgpr0
94    %1:_(s32) = G_CONSTANT i32 5
95    %2:_(s32) = G_CONSTANT i32 -1
96    %3:_(s32) = G_CONSTANT i32 31
97    %4:_(s32) = G_SHL %0, %1(s32)
98    %5:_(s32) = G_XOR %4(s32), %2(s32)
99    %6:_(s32) = G_OR %5(s32), %3(s32)
100    $sgpr0 = COPY %6(s32)
101    SI_RETURN_TO_EPILOG implicit $sgpr0
102...
103
104---
105name:            test_lshr_xor_or
106tracksRegLiveness: true
107body:             |
108  bb.0:
109    liveins: $vgpr0
110
111    ; CHECK-LABEL: name: test_lshr_xor_or
112    ; CHECK: liveins: $vgpr0
113    ; CHECK-NEXT: {{  $}}
114    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
115    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
116    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
117    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
118    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[LSHR]], [[C1]]
119    ; CHECK-NEXT: $vgpr0 = COPY [[XOR]](s32)
120    ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
121    %0:_(s32) = COPY $vgpr0
122    %1:_(s32) = G_CONSTANT i32 5
123    %2:_(s32) = G_CONSTANT i32 -1
124    %3:_(s32) = G_CONSTANT i32 4160749568
125    %4:_(s32) = G_LSHR %0, %1(s32)
126    %5:_(s32) = G_XOR %4(s32), %2(s32)
127    %6:_(s32) = G_OR %5(s32), %3(s32)
128    $vgpr0 = COPY %6(s32)
129    SI_RETURN_TO_EPILOG implicit $vgpr0
130...
131
132---
133name:            test_or_non_const
134tracksRegLiveness: true
135body:             |
136  bb.0:
137    liveins: $sgpr0, $sgpr1
138
139    ; CHECK-LABEL: name: test_or_non_const
140    ; CHECK: liveins: $sgpr0, $sgpr1
141    ; CHECK-NEXT: {{  $}}
142    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr0
143    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
144    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
145    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
146    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[LSHR]], [[C1]]
147    ; CHECK-NEXT: $sgpr0 = COPY [[XOR]](s32)
148    ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0
149    %0:_(s32) = COPY $sgpr0
150    %1:_(s32) = COPY $sgpr1
151    %2:_(s32) = G_CONSTANT i32 16
152    %3:_(s32) = G_CONSTANT i32 -1
153    %4:_(s32) = G_CONSTANT i32 4294901760
154    %5:_(s32) = G_LSHR %0, %2(s32)
155    %6:_(s32) = G_XOR %5, %3(s32)
156    %7:_(s32) = G_AND %1, %4(s32)
157    %8:_(s32) = G_OR %6, %7
158    $sgpr0 = COPY %8(s32)
159    SI_RETURN_TO_EPILOG implicit $sgpr0
160...
161---
162name:            vector_const_splat_const_splat
163tracksRegLiveness: true
164body:             |
165  bb.0:
166    ; CHECK-LABEL: name: vector_const_splat_const_splat
167    ; CHECK: %mask:_(s16) = G_CONSTANT i16 255
168    ; CHECK-NEXT: %c2:_(<2 x s16>) = G_BUILD_VECTOR %mask(s16), %mask(s16)
169    ; CHECK-NEXT: $vgpr0 = COPY %c2(<2 x s16>)
170    ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
171    %mask:_(s16) = G_CONSTANT i16 255
172    %fifteen:_(s16) = G_CONSTANT i16 15
173    %c1:_(<2 x s16>) = G_BUILD_VECTOR %fifteen, %fifteen
174    %c2:_(<2 x s16>) = G_BUILD_VECTOR %mask, %mask
175    %and:_(<2 x s16>) = G_OR %c1(<2 x s16>), %c2(<2 x s16>)
176    $vgpr0 = COPY %and(<2 x s16>)
177    SI_RETURN_TO_EPILOG implicit $vgpr0
178...
179---
180name:            vector_const_valid_not_splat
181tracksRegLiveness: true
182body:             |
183  bb.0:
184    ; CHECK-LABEL: name: vector_const_valid_not_splat
185    ; CHECK: %mask:_(s16) = G_CONSTANT i16 255
186    ; CHECK-NEXT: %c2:_(<2 x s16>) = G_BUILD_VECTOR %mask(s16), %mask(s16)
187    ; CHECK-NEXT: $vgpr0 = COPY %c2(<2 x s16>)
188    ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
189    %fifteen:_(s16) = G_CONSTANT i16 15
190    %sixteen:_(s16) = G_CONSTANT i16 16
191    %mask:_(s16) = G_CONSTANT i16 255
192    %c1:_(<2 x s16>) = G_BUILD_VECTOR %fifteen, %sixteen
193    %c2:_(<2 x s16>) = G_BUILD_VECTOR %mask, %mask
194    %and:_(<2 x s16>) = G_OR %c1(<2 x s16>), %c2(<2 x s16>)
195    $vgpr0 = COPY %and(<2 x s16>)
196    SI_RETURN_TO_EPILOG implicit $vgpr0
197...
198---
199name:            vector_dont_combine_const_too_wide
200tracksRegLiveness: true
201body:             |
202  bb.0:
203    ; CHECK-LABEL: name: vector_dont_combine_const_too_wide
204    ; CHECK: %fifteen:_(s16) = G_CONSTANT i16 15
205    ; CHECK-NEXT: %too_wide:_(s16) = G_CONSTANT i16 257
206    ; CHECK-NEXT: %mask:_(s16) = G_CONSTANT i16 255
207    ; CHECK-NEXT: %c1:_(<2 x s16>) = G_BUILD_VECTOR %fifteen(s16), %too_wide(s16)
208    ; CHECK-NEXT: %c2:_(<2 x s16>) = G_BUILD_VECTOR %mask(s16), %mask(s16)
209    ; CHECK-NEXT: %and:_(<2 x s16>) = G_OR %c1, %c2
210    ; CHECK-NEXT: $vgpr0 = COPY %and(<2 x s16>)
211    ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
212    %fifteen:_(s16) = G_CONSTANT i16 15
213    %too_wide:_(s16) = G_CONSTANT i16 257
214    %mask:_(s16) = G_CONSTANT i16 255
215    %c1:_(<2 x s16>) = G_BUILD_VECTOR %fifteen, %too_wide
216    %c2:_(<2 x s16>) = G_BUILD_VECTOR %mask, %mask
217    %and:_(<2 x s16>) = G_OR %c1(<2 x s16>), %c2(<2 x s16>)
218    $vgpr0 = COPY %and(<2 x s16>)
219    SI_RETURN_TO_EPILOG implicit $vgpr0
220...
221