xref: /llvm-project/llvm/test/CodeGen/AArch64/GlobalISel/select-redundant-zext.mir (revision 112fba974ce42a6e552f7391d20a858a128283a1)
1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2# RUN: llc -mtriple=aarch64 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
3
4...
5---
6name:            fold
7legalized:       true
8regBankSelected: true
9tracksRegLiveness: true
10body:             |
11  bb.0:
12    liveins: $w0, $w1
13
14    ; This should not have an UBFMXri, since ADDWrr implicitly gives us the
15    ; zext.
16
17    ; CHECK-LABEL: name: fold
18    ; CHECK: liveins: $w0, $w1
19    ; CHECK-NEXT: {{  $}}
20    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
21    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
22    ; CHECK-NEXT: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[COPY1]], [[COPY]]
23    ; CHECK-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, [[ADDWrr]], 0
24    ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
25    ; CHECK-NEXT: $x0 = COPY [[SUBREG_TO_REG]]
26    ; CHECK-NEXT: RET_ReallyLR implicit $x0
27    %0:gpr(s32) = COPY $w0
28    %1:gpr(s32) = COPY $w1
29    %2:gpr(s32) = G_ADD %1, %0
30    %3:gpr(s64) = G_ZEXT %2(s32)
31    $x0 = COPY %3(s64)
32    RET_ReallyLR implicit $x0
33
34...
35---
36name:            dont_fold_s16
37legalized:       true
38regBankSelected: true
39tracksRegLiveness: true
40body:             |
41  bb.0:
42    liveins: $w0, $w1
43
44    ; We should have a UBFMXri here, because we only do this for zero extends
45    ; from 32 bits to 64 bits.
46
47    ; CHECK-LABEL: name: dont_fold_s16
48    ; CHECK: liveins: $w0, $w1
49    ; CHECK-NEXT: {{  $}}
50    ; CHECK-NEXT: [[DEF:%[0-9]+]]:gpr32 = IMPLICIT_DEF
51    ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[DEF]], %subreg.sub_32
52    ; CHECK-NEXT: [[UBFMXri:%[0-9]+]]:gpr64 = UBFMXri [[SUBREG_TO_REG]], 0, 15
53    ; CHECK-NEXT: $x0 = COPY [[UBFMXri]]
54    ; CHECK-NEXT: RET_ReallyLR implicit $x0
55    %0:gpr(s16) = G_IMPLICIT_DEF
56    %3:gpr(s64) = G_ZEXT %0(s16)
57    $x0 = COPY %3(s64)
58    RET_ReallyLR implicit $x0
59
60...
61---
62name:            dont_fold_copy
63legalized:       true
64regBankSelected: true
65tracksRegLiveness: true
66body:             |
67  bb.0:
68    liveins: $w0
69
70    ; We should have a ORRWrs here, because isDef32 disallows copies.
71
72    ; CHECK-LABEL: name: dont_fold_copy
73    ; CHECK: liveins: $w0
74    ; CHECK-NEXT: {{  $}}
75    ; CHECK-NEXT: %copy:gpr32 = COPY $w0
76    ; CHECK-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, %copy, 0
77    ; CHECK-NEXT: %zext:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
78    ; CHECK-NEXT: $x0 = COPY %zext
79    ; CHECK-NEXT: RET_ReallyLR implicit $x0
80    %copy:gpr(s32) = COPY $w0
81    %zext:gpr(s64) = G_ZEXT %copy(s32)
82    $x0 = COPY %zext(s64)
83    RET_ReallyLR implicit $x0
84
85...
86---
87name:            dont_fold_bitcast
88legalized:       true
89regBankSelected: true
90tracksRegLiveness: true
91body:             |
92  bb.0:
93    liveins: $w0
94
95    ; We should have a ORRWrs here, because isDef32 disallows bitcasts.
96
97    ; CHECK-LABEL: name: dont_fold_bitcast
98    ; CHECK: liveins: $w0
99    ; CHECK-NEXT: {{  $}}
100    ; CHECK-NEXT: %copy:gpr32all = COPY $w0
101    ; CHECK-NEXT: %bitcast1:gpr32 = COPY %copy
102    ; CHECK-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, %bitcast1, 0
103    ; CHECK-NEXT: %zext:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
104    ; CHECK-NEXT: $x0 = COPY %zext
105    ; CHECK-NEXT: RET_ReallyLR implicit $x0
106    %copy:gpr(s32) = COPY $w0
107    %bitcast0:gpr(<4 x s8>) = G_BITCAST %copy(s32)
108    %bitcast1:gpr(s32) = G_BITCAST %bitcast0
109    %zext:gpr(s64) = G_ZEXT %bitcast1(s32)
110    $x0 = COPY %zext(s64)
111    RET_ReallyLR implicit $x0
112
113...
114---
115name:            dont_fold_trunc
116legalized:       true
117regBankSelected: true
118tracksRegLiveness: true
119body:             |
120  bb.0:
121    liveins: $x0
122
123    ; We should have a ORRWrs here, because isDef32 disallows truncs.
124
125    ; CHECK-LABEL: name: dont_fold_trunc
126    ; CHECK: liveins: $x0
127    ; CHECK-NEXT: {{  $}}
128    ; CHECK-NEXT: %copy:gpr64sp = COPY $x0
129    ; CHECK-NEXT: %trunc:gpr32common = COPY %copy.sub_32
130    ; CHECK-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, %trunc, 0
131    ; CHECK-NEXT: %zext:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
132    ; CHECK-NEXT: $x0 = COPY %zext
133    ; CHECK-NEXT: RET_ReallyLR implicit $x0
134    %copy:gpr(s64) = COPY $x0
135    %trunc:gpr(s32) = G_TRUNC %copy(s64)
136    %zext:gpr(s64) = G_ZEXT %trunc(s32)
137    $x0 = COPY %zext(s64)
138    RET_ReallyLR implicit $x0
139
140...
141---
142name:            dont_fold_phi
143legalized:       true
144regBankSelected: true
145tracksRegLiveness: true
146body:             |
147  ; CHECK-LABEL: name: dont_fold_phi
148  ; CHECK: bb.0:
149  ; CHECK-NEXT:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
150  ; CHECK-NEXT:   liveins: $w0, $w1, $w2
151  ; CHECK-NEXT: {{  $}}
152  ; CHECK-NEXT:   %copy1:gpr32all = COPY $w0
153  ; CHECK-NEXT:   %copy2:gpr32all = COPY $w1
154  ; CHECK-NEXT:   %cond_wide:gpr32 = COPY $w2
155  ; CHECK-NEXT:   TBNZW %cond_wide, 0, %bb.1
156  ; CHECK-NEXT:   B %bb.2
157  ; CHECK-NEXT: {{  $}}
158  ; CHECK-NEXT: bb.1:
159  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
160  ; CHECK-NEXT: {{  $}}
161  ; CHECK-NEXT: bb.2:
162  ; CHECK-NEXT:   %phi:gpr32 = PHI %copy1, %bb.0, %copy2, %bb.1
163  ; CHECK-NEXT:   [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, %phi, 0
164  ; CHECK-NEXT:   [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
165  ; CHECK-NEXT:   $x0 = COPY [[SUBREG_TO_REG]]
166  ; CHECK-NEXT:   RET_ReallyLR implicit $x0
167  ; We should have a ORRWrs here, because isDef32 disallows phis.
168
169  bb.0:
170    liveins: $w0, $w1, $w2
171
172    %copy1:gpr(s32) = COPY $w0
173    %copy2:gpr(s32) = COPY $w1
174    %cond_wide:gpr(s32) = COPY $w2
175    G_BRCOND %cond_wide, %bb.1
176    G_BR %bb.2
177
178  bb.1:
179
180  bb.2:
181    %phi:gpr(s32) = G_PHI %copy1(s32), %bb.0, %copy2(s32), %bb.1
182    %5:gpr(s64) = G_ZEXT %phi(s32)
183    $x0 = COPY %5(s64)
184    RET_ReallyLR implicit $x0
185
186...
187---
188name:            dont_look_through_copy
189legalized:       true
190regBankSelected: true
191tracksRegLiveness: true
192body:             |
193  bb.0:
194    liveins: $w0, $w1
195
196    ; Make sure we don't walk past the copy.
197
198    ; CHECK-LABEL: name: dont_look_through_copy
199    ; CHECK: liveins: $w0, $w1
200    ; CHECK-NEXT: {{  $}}
201    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
202    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
203    ; CHECK-NEXT: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[COPY1]], [[COPY]]
204    ; CHECK-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, [[ADDWrr]], 0
205    ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
206    ; CHECK-NEXT: $x0 = COPY [[SUBREG_TO_REG]]
207    ; CHECK-NEXT: RET_ReallyLR implicit $x0
208    %0:gpr(s32) = COPY $w0
209    %1:gpr(s32) = COPY $w1
210    %2:gpr(s32) = G_ADD %1, %0
211    %3:gpr(s32) = COPY %2(s32)
212    %4:gpr(s64) = G_ZEXT %3(s32)
213    $x0 = COPY %4(s64)
214    RET_ReallyLR implicit $x0
215