xref: /llvm-project/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir (revision e4a2d74e0917d481ecda8e8ff0c0af3c683c9441)
1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2# RUN: llc -mtriple=aarch64-unknown-unknown -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-FAST --allow-unused-prefixes
3# RUN: llc -mtriple=aarch64-unknown-unknown -mattr=+addr-lsl-slow-14 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SLOW --allow-unused-prefixes
4
5--- |
6  define void @ldrxrox_breg_oreg(ptr %addr) { ret void }
7  define void @ldrdrox_breg_oreg(ptr %addr) { ret void }
8  define void @more_than_one_use(ptr %addr) { ret void }
9  define void @ldrhrox_shl(ptr %addr) { ret void }
10  define void @ldrwrox_shl(ptr %addr) { ret void }
11  define void @ldrxrox_shl(ptr %addr) { ret void }
12  define void @ldrdrox_shl(ptr %addr) { ret void }
13  define void @ldrqrox_shl(ptr %addr) { ret void }
14  define void @ldrxrox_mul_rhs(ptr %addr) { ret void }
15  define void @ldrdrox_mul_rhs(ptr %addr) { ret void }
16  define void @ldrxrox_mul_lhs(ptr %addr) { ret void }
17  define void @ldrdrox_mul_lhs(ptr %addr) { ret void }
18  define void @mul_not_pow_2(ptr %addr) { ret void }
19  define void @mul_wrong_pow_2(ptr %addr) { ret void }
20  define void @more_than_one_use_shl_fallback(ptr %addr) { ret void }
21  define void @ldrxrox_more_than_one_mem_use_shl(ptr %addr) { ret void }
22  define void @ldrxrox_more_than_one_use_shl(ptr %addr) { ret void }
23  define void @ldrhrox_more_than_one_mem_use_shl(ptr %addr) { ret void }
24  define void @ldrhrox_more_than_one_use_shl(ptr %addr) { ret void }
25  define void @ldrwrox_more_than_one_use_shl(ptr %addr) { ret void }
26  define void @ldrqrox_more_than_one_use_shl(ptr %addr) { ret void }
27  define void @more_than_one_use_shl_lsl(ptr %addr) { ret void }
28  define void @more_than_one_use_shl_minsize(ptr %addr) #0 { ret void }
29  define void @ldrwrox(ptr %addr) { ret void }
30  define void @ldrsrox(ptr %addr) { ret void }
31  define void @ldrhrox(ptr %addr) { ret void }
32  define void @ldbbrox(ptr %addr) { ret void }
33  define void @ldrqrox(ptr %addr) { ret void }
34  attributes #0 = { optsize }
35...
36
37---
38name:            ldrxrox_breg_oreg
39alignment:       4
40legalized:       true
41regBankSelected: true
42tracksRegLiveness: true
43machineFunctionInfo: {}
44body:             |
45  bb.0:
46    liveins: $x0, $x1
47
48    ; CHECK-LABEL: name: ldrxrox_breg_oreg
49    ; CHECK: liveins: $x0, $x1
50    ; CHECK-NEXT: {{  $}}
51    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
52    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
53    ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY]], [[COPY1]], 0, 0 :: (load (s64) from %ir.addr)
54    ; CHECK-NEXT: $x0 = COPY [[LDRXroX]]
55    ; CHECK-NEXT: RET_ReallyLR implicit $x0
56    %0:gpr(p0) = COPY $x0
57    %1:gpr(s64) = COPY $x1
58    %2:gpr(p0) = G_PTR_ADD %0, %1
59    %4:gpr(s64) = G_LOAD %2(p0) :: (load (s64) from %ir.addr)
60    $x0 = COPY %4(s64)
61    RET_ReallyLR implicit $x0
62...
63
64---
65name:            ldrdrox_breg_oreg
66alignment:       4
67legalized:       true
68regBankSelected: true
69tracksRegLiveness: true
70machineFunctionInfo: {}
71body:             |
72  bb.0:
73    liveins: $d0, $x1
74    ; CHECK-LABEL: name: ldrdrox_breg_oreg
75    ; CHECK: liveins: $d0, $x1
76    ; CHECK-NEXT: {{  $}}
77    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
78    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
79    ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY]], [[COPY1]], 0, 0 :: (load (s64) from %ir.addr)
80    ; CHECK-NEXT: $d0 = COPY [[LDRDroX]]
81    ; CHECK-NEXT: RET_ReallyLR implicit $d0
82    %0:gpr(p0) = COPY $d0
83    %1:gpr(s64) = COPY $x1
84    %2:gpr(p0) = G_PTR_ADD %0, %1
85    %4:fpr(s64) = G_LOAD %2(p0) :: (load (s64) from %ir.addr)
86    $d0 = COPY %4(s64)
87    RET_ReallyLR implicit $d0
88...
89---
90# This shouldn't be folded, since we reuse the result of the G_PTR_ADD outside
91# the G_LOAD
92
93name:            more_than_one_use
94alignment:       4
95legalized:       true
96regBankSelected: true
97tracksRegLiveness: true
98machineFunctionInfo: {}
99body:             |
100  bb.0:
101    liveins: $x0, $x1
102    ; CHECK-LABEL: name: more_than_one_use
103    ; CHECK: liveins: $x0, $x1
104    ; CHECK-NEXT: {{  $}}
105    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
106    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
107    ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY]], [[COPY1]]
108    ; CHECK-NEXT: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load (s64) from %ir.addr)
109    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY [[ADDXrr]]
110    ; CHECK-NEXT: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[LDRXui]]
111    ; CHECK-NEXT: $x0 = COPY [[ADDXrr1]]
112    ; CHECK-NEXT: RET_ReallyLR implicit $x0
113    %0:gpr(p0) = COPY $x0
114    %1:gpr(s64) = COPY $x1
115    %2:gpr(p0) = G_PTR_ADD %0, %1
116    %4:gpr(s64) = G_LOAD %2(p0) :: (load (s64) from %ir.addr)
117    %5:gpr(s64) = G_PTRTOINT %2
118    %6:gpr(s64) = G_ADD %5, %4
119    $x0 = COPY %6(s64)
120    RET_ReallyLR implicit $x0
121
122...
123---
124name:            ldrhrox_shl
125alignment:       4
126legalized:       true
127regBankSelected: true
128tracksRegLiveness: true
129machineFunctionInfo: {}
130body:             |
131  bb.0:
132    liveins: $x0, $x1, $x2
133    liveins: $w1, $x0
134
135    ; CHECK-LABEL: name: ldrhrox_shl
136    ; CHECK: liveins: $x0, $x1, $x2, $w1, $x0
137    ; CHECK-NEXT: {{  $}}
138    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
139    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
140    ; CHECK-NEXT: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY1]], 9, 31
141    ; CHECK-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, [[UBFMWri]], 0
142    ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
143    ; CHECK-NEXT: [[ANDXri:%[0-9]+]]:gpr64common = ANDXri [[SUBREG_TO_REG]], 4103
144    ; CHECK-NEXT: [[LDRHHroX:%[0-9]+]]:gpr32 = LDRHHroX [[COPY]], [[ANDXri]], 0, 1 :: (load (s16))
145    ; CHECK-NEXT: RET_ReallyLR implicit [[LDRHHroX]]
146    %0:gpr(p0) = COPY $x0
147    %1:gpr(s32) = COPY $w1
148    %15:gpr(s64) = G_CONSTANT i64 9
149    %3:gpr(s32) = G_LSHR %1, %15(s64)
150    %4:gpr(s64) = G_ZEXT %3(s32)
151    %5:gpr(s64) = G_CONSTANT i64 255
152    %6:gpr(s64) = G_AND %4, %5
153    %13:gpr(s64) = G_CONSTANT i64 1
154    %8:gpr(s64) = G_SHL %6, %13(s64)
155    %9:gpr(p0) = G_PTR_ADD %0, %8(s64)
156    %12:gpr(s32) = G_LOAD %9(p0) :: (load (s16))
157    RET_ReallyLR implicit %12
158...
159---
160name:            ldrwrox_shl
161alignment:       4
162legalized:       true
163regBankSelected: true
164tracksRegLiveness: true
165machineFunctionInfo: {}
166body:             |
167  bb.0:
168    liveins: $x0, $x1, $x2
169    ; CHECK-LABEL: name: ldrwrox_shl
170    ; CHECK: liveins: $x0, $x1, $x2
171    ; CHECK-NEXT: {{  $}}
172    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
173    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
174    ; CHECK-NEXT: [[LDRWroX:%[0-9]+]]:gpr32 = LDRWroX [[COPY1]], [[COPY]], 0, 1 :: (load (s32) from %ir.addr)
175    ; CHECK-NEXT: RET_ReallyLR implicit [[LDRWroX]]
176    %0:gpr(s64) = COPY $x0
177    %1:gpr(s64) = G_CONSTANT i64 2
178    %2:gpr(s64) = G_SHL %0, %1(s64)
179    %3:gpr(p0) = COPY $x1
180    %4:gpr(p0) = G_PTR_ADD %3, %2
181    %5:gpr(s32) = G_LOAD %4(p0) :: (load (s32) from %ir.addr)
182    RET_ReallyLR implicit %5
183...
184---
185name:            ldrxrox_shl
186alignment:       4
187legalized:       true
188regBankSelected: true
189tracksRegLiveness: true
190machineFunctionInfo: {}
191body:             |
192  bb.0:
193    liveins: $x0, $x1, $x2
194    ; CHECK-LABEL: name: ldrxrox_shl
195    ; CHECK: liveins: $x0, $x1, $x2
196    ; CHECK-NEXT: {{  $}}
197    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
198    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
199    ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
200    ; CHECK-NEXT: $x2 = COPY [[LDRXroX]]
201    ; CHECK-NEXT: RET_ReallyLR implicit $x2
202    %0:gpr(s64) = COPY $x0
203    %1:gpr(s64) = G_CONSTANT i64 3
204    %2:gpr(s64) = G_SHL %0, %1(s64)
205    %3:gpr(p0) = COPY $x1
206    %4:gpr(p0) = G_PTR_ADD %3, %2
207    %5:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
208    $x2 = COPY %5(s64)
209    RET_ReallyLR implicit $x2
210
211...
212---
213name:            ldrdrox_shl
214alignment:       4
215legalized:       true
216regBankSelected: true
217tracksRegLiveness: true
218machineFunctionInfo: {}
219body:             |
220  bb.0:
221    liveins: $x0, $x1, $d2
222    ; CHECK-LABEL: name: ldrdrox_shl
223    ; CHECK: liveins: $x0, $x1, $d2
224    ; CHECK-NEXT: {{  $}}
225    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
226    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
227    ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
228    ; CHECK-NEXT: $d2 = COPY [[LDRDroX]]
229    ; CHECK-NEXT: RET_ReallyLR implicit $d2
230    %0:gpr(s64) = COPY $x0
231    %1:gpr(s64) = G_CONSTANT i64 3
232    %2:gpr(s64) = G_SHL %0, %1(s64)
233    %3:gpr(p0) = COPY $x1
234    %4:gpr(p0) = G_PTR_ADD %3, %2
235    %5:fpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
236    $d2 = COPY %5(s64)
237    RET_ReallyLR implicit $d2
238
239...
240---
241name:            ldrqrox_shl
242alignment:       4
243legalized:       true
244regBankSelected: true
245tracksRegLiveness: true
246machineFunctionInfo: {}
247body:             |
248  bb.0:
249    liveins: $x0, $x1, $d2
250    ; CHECK-LABEL: name: ldrqrox_shl
251    ; CHECK: liveins: $x0, $x1, $d2
252    ; CHECK-NEXT: {{  $}}
253    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
254    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
255    ; CHECK-NEXT: [[LDRQroX:%[0-9]+]]:fpr128 = LDRQroX [[COPY1]], [[COPY]], 0, 1 :: (load (s128) from %ir.addr)
256    ; CHECK-NEXT: RET_ReallyLR implicit [[LDRQroX]]
257    %0:gpr(s64) = COPY $x0
258    %1:gpr(s64) = G_CONSTANT i64 4
259    %2:gpr(s64) = G_SHL %0, %1(s64)
260    %3:gpr(p0) = COPY $x1
261    %4:gpr(p0) = G_PTR_ADD %3, %2
262    %5:fpr(s128) = G_LOAD %4(p0) :: (load (s128) from %ir.addr)
263    RET_ReallyLR implicit %5
264
265...
266---
267name:            ldrxrox_mul_rhs
268alignment:       4
269legalized:       true
270regBankSelected: true
271tracksRegLiveness: true
272machineFunctionInfo: {}
273body:             |
274  bb.0:
275    liveins: $x0, $x1, $x2
276    ; CHECK-LABEL: name: ldrxrox_mul_rhs
277    ; CHECK: liveins: $x0, $x1, $x2
278    ; CHECK-NEXT: {{  $}}
279    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
280    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
281    ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
282    ; CHECK-NEXT: $x2 = COPY [[LDRXroX]]
283    ; CHECK-NEXT: RET_ReallyLR implicit $x2
284    %0:gpr(s64) = COPY $x0
285    %1:gpr(s64) = G_CONSTANT i64 8
286    %2:gpr(s64) = G_MUL %0, %1(s64)
287    %3:gpr(p0) = COPY $x1
288    %4:gpr(p0) = G_PTR_ADD %3, %2
289    %5:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
290    $x2 = COPY %5(s64)
291    RET_ReallyLR implicit $x2
292
293...
294---
295name:            ldrdrox_mul_rhs
296alignment:       4
297legalized:       true
298regBankSelected: true
299tracksRegLiveness: true
300machineFunctionInfo: {}
301body:             |
302  bb.0:
303    liveins: $x0, $x1, $d2
304    ; CHECK-LABEL: name: ldrdrox_mul_rhs
305    ; CHECK: liveins: $x0, $x1, $d2
306    ; CHECK-NEXT: {{  $}}
307    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
308    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
309    ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
310    ; CHECK-NEXT: $d2 = COPY [[LDRDroX]]
311    ; CHECK-NEXT: RET_ReallyLR implicit $d2
312    %0:gpr(s64) = COPY $x0
313    %1:gpr(s64) = G_CONSTANT i64 8
314    %2:gpr(s64) = G_MUL %0, %1(s64)
315    %3:gpr(p0) = COPY $x1
316    %4:gpr(p0) = G_PTR_ADD %3, %2
317    %5:fpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
318    $d2 = COPY %5(s64)
319    RET_ReallyLR implicit $d2
320
321...
322---
323name:            ldrxrox_mul_lhs
324alignment:       4
325legalized:       true
326regBankSelected: true
327tracksRegLiveness: true
328machineFunctionInfo: {}
329body:             |
330  bb.0:
331    liveins: $x0, $x1, $x2
332    ; CHECK-LABEL: name: ldrxrox_mul_lhs
333    ; CHECK: liveins: $x0, $x1, $x2
334    ; CHECK-NEXT: {{  $}}
335    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
336    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
337    ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
338    ; CHECK-NEXT: $x2 = COPY [[LDRXroX]]
339    ; CHECK-NEXT: RET_ReallyLR implicit $x2
340    %0:gpr(s64) = COPY $x0
341    %1:gpr(s64) = G_CONSTANT i64 8
342    %2:gpr(s64) = G_MUL %1, %0(s64)
343    %3:gpr(p0) = COPY $x1
344    %4:gpr(p0) = G_PTR_ADD %3, %2
345    %5:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
346    $x2 = COPY %5(s64)
347    RET_ReallyLR implicit $x2
348
349...
350---
351name:            ldrdrox_mul_lhs
352alignment:       4
353legalized:       true
354regBankSelected: true
355tracksRegLiveness: true
356machineFunctionInfo: {}
357body:             |
358  bb.0:
359    liveins: $x0, $x1, $d2
360    ; CHECK-LABEL: name: ldrdrox_mul_lhs
361    ; CHECK: liveins: $x0, $x1, $d2
362    ; CHECK-NEXT: {{  $}}
363    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
364    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
365    ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
366    ; CHECK-NEXT: $d2 = COPY [[LDRDroX]]
367    ; CHECK-NEXT: RET_ReallyLR implicit $d2
368    %0:gpr(s64) = COPY $x0
369    %1:gpr(s64) = G_CONSTANT i64 8
370    %2:gpr(s64) = G_MUL %1, %0(s64)
371    %3:gpr(p0) = COPY $x1
372    %4:gpr(p0) = G_PTR_ADD %3, %2
373    %5:fpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
374    $d2 = COPY %5(s64)
375    RET_ReallyLR implicit $d2
376
377...
378---
379# Show that we don't get a shifted load from a mul when we don't have a
380# power of 2. (The bit isn't set on the load.)
381
382name:            mul_not_pow_2
383alignment:       4
384legalized:       true
385regBankSelected: true
386tracksRegLiveness: true
387machineFunctionInfo: {}
388body:             |
389  bb.0:
390    liveins: $x0, $x1, $d2
391    ; CHECK-LABEL: name: mul_not_pow_2
392    ; CHECK: liveins: $x0, $x1, $d2
393    ; CHECK-NEXT: {{  $}}
394    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
395    ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 7
396    ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
397    ; CHECK-NEXT: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[SUBREG_TO_REG]], [[COPY]], $xzr
398    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
399    ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load (s64) from %ir.addr)
400    ; CHECK-NEXT: $d2 = COPY [[LDRDroX]]
401    ; CHECK-NEXT: RET_ReallyLR implicit $d2
402    %0:gpr(s64) = COPY $x0
403    %1:gpr(s64) = G_CONSTANT i64 7
404    %2:gpr(s64) = G_MUL %1, %0(s64)
405    %3:gpr(p0) = COPY $x1
406    %4:gpr(p0) = G_PTR_ADD %3, %2
407    %5:fpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
408    $d2 = COPY %5(s64)
409    RET_ReallyLR implicit $d2
410
411...
412---
413# Show that we don't get a shifted load from a mul when we don't have
414# the right power of 2. (The bit isn't set on the load.)
415
416name:            mul_wrong_pow_2
417alignment:       4
418legalized:       true
419regBankSelected: true
420tracksRegLiveness: true
421machineFunctionInfo: {}
422body:             |
423  bb.0:
424    liveins: $x0, $x1, $d2
425    ; CHECK-LABEL: name: mul_wrong_pow_2
426    ; CHECK: liveins: $x0, $x1, $d2
427    ; CHECK-NEXT: {{  $}}
428    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
429    ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 16
430    ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
431    ; CHECK-NEXT: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[SUBREG_TO_REG]], [[COPY]], $xzr
432    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
433    ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load (s64) from %ir.addr)
434    ; CHECK-NEXT: $d2 = COPY [[LDRDroX]]
435    ; CHECK-NEXT: RET_ReallyLR implicit $d2
436    %0:gpr(s64) = COPY $x0
437    %1:gpr(s64) = G_CONSTANT i64 16
438    %2:gpr(s64) = G_MUL %1, %0(s64)
439    %3:gpr(p0) = COPY $x1
440    %4:gpr(p0) = G_PTR_ADD %3, %2
441    %5:fpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
442    $d2 = COPY %5(s64)
443    RET_ReallyLR implicit $d2
444
445...
446---
447# Show that we can still fall back to the register-register addressing
448# mode when we fail to pull in the shift.
449
450name:            more_than_one_use_shl_fallback
451alignment:       4
452legalized:       true
453regBankSelected: true
454tracksRegLiveness: true
455machineFunctionInfo: {}
456body:             |
457  bb.0:
458    liveins: $x0, $x1, $x2
459    ; CHECK-LABEL: name: more_than_one_use_shl_fallback
460    ; CHECK: liveins: $x0, $x1, $x2
461    ; CHECK-NEXT: {{  $}}
462    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
463    ; CHECK-NEXT: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 62, 61
464    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
465    ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[UBFMXri]], 0, 0 :: (load (s64) from %ir.addr)
466    ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 2, 0
467    ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[ADDXri]]
468    ; CHECK-NEXT: $x2 = COPY [[ADDXrr]]
469    ; CHECK-NEXT: RET_ReallyLR implicit $x2
470    %0:gpr(s64) = COPY $x0
471    %1:gpr(s64) = G_CONSTANT i64 2
472    %2:gpr(s64) = G_SHL %0, %1(s64)
473    %3:gpr(p0) = COPY $x1
474    %4:gpr(p0) = G_PTR_ADD %3, %2
475    %5:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
476    %6:gpr(s64) = G_ADD %2, %1
477    %7:gpr(s64) = G_ADD %5, %6
478    $x2 = COPY %7(s64)
479    RET_ReallyLR implicit $x2
480
481...
482---
483name:            ldrxrox_more_than_one_mem_use_shl
484alignment:       4
485legalized:       true
486regBankSelected: true
487tracksRegLiveness: true
488machineFunctionInfo: {}
489body:             |
490  bb.0:
491    liveins: $x0, $x1, $x2
492    ; CHECK-LABEL: name: ldrxrox_more_than_one_mem_use_shl
493    ; CHECK: liveins: $x0, $x1, $x2
494    ; CHECK-NEXT: {{  $}}
495    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
496    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
497    ; CHECK-NEXT: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY1]], 9, 31
498    ; CHECK-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, [[UBFMWri]], 0
499    ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
500    ; CHECK-NEXT: [[ANDXri:%[0-9]+]]:gpr64common = ANDXri [[SUBREG_TO_REG]], 4103
501    ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY]], [[ANDXri]], 0, 1 :: (load (s64))
502    ; CHECK-NEXT: [[LDRXroX1:%[0-9]+]]:gpr64 = LDRXroX [[COPY]], [[ANDXri]], 0, 1 :: (load (s64))
503    ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[LDRXroX1]]
504    ; CHECK-NEXT: RET_ReallyLR implicit [[ADDXrr]]
505    %0:gpr(p0) = COPY $x0
506    %1:gpr(s32) = COPY $w1
507    %15:gpr(s64) = G_CONSTANT i64 9
508    %3:gpr(s32) = G_LSHR %1, %15(s64)
509    %4:gpr(s64) = G_ZEXT %3(s32)
510    %5:gpr(s64) = G_CONSTANT i64 255
511    %6:gpr(s64) = G_AND %4, %5
512    %13:gpr(s64) = G_CONSTANT i64 3
513    %8:gpr(s64) = G_SHL %6, %13(s64)
514    %9:gpr(p0) = G_PTR_ADD %0, %8(s64)
515    %12:gpr(s64) = G_LOAD %9(p0) :: (load (s64))
516    %17:gpr(s64) = G_LOAD %9(p0) :: (load (s64))
517    %18:gpr(s64) = G_ADD %12, %17
518    RET_ReallyLR implicit %18
519
520...
521---
522# Show that when the GEP is used both inside and outside a memory op, we only fold the memory op.
523
524name:            ldrxrox_more_than_one_use_shl
525alignment:       4
526legalized:       true
527regBankSelected: true
528tracksRegLiveness: true
529machineFunctionInfo: {}
530body:             |
531  bb.0:
532    liveins: $x0, $x1, $x2
533    ; CHECK-LABEL: name: ldrxrox_more_than_one_use_shl
534    ; CHECK: liveins: $x0, $x1, $x2
535    ; CHECK-NEXT: {{  $}}
536    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
537    ; CHECK-NEXT: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
538    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64common = COPY $x1
539    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY [[COPY1]]
540    ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[UBFMXri]]
541    ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
542    ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
543    ; CHECK-NEXT: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[ADDXri]]
544    ; CHECK-NEXT: [[ADDXrr2:%[0-9]+]]:gpr64 = ADDXrr [[ADDXrr]], [[ADDXrr1]]
545    ; CHECK-NEXT: $x2 = COPY [[ADDXrr2]]
546    ; CHECK-NEXT: RET_ReallyLR implicit $x2
547    %0:gpr(s64) = COPY $x0
548    %1:gpr(s64) = G_CONSTANT i64 3
549    %2:gpr(s64) = G_SHL %0, %1(s64)
550    %3:gpr(p0) = COPY $x1
551    %4:gpr(p0) = G_PTR_ADD %3, %2
552    %5:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
553    %6:gpr(s64) = G_ADD %2, %1
554    %7:gpr(s64) = G_ADD %5, %6
555    %8:gpr(s64) = G_PTRTOINT %4
556    %9:gpr(s64) = G_ADD %8, %7
557    $x2 = COPY %9(s64)
558    RET_ReallyLR implicit $x2
559
560...
561---
562# Fold SHL into LSL for mem ops. Do not fold if the target has LSLSLOW14.
563name:            ldrhrox_more_than_one_mem_use_shl
564alignment:       4
565legalized:       true
566regBankSelected: true
567tracksRegLiveness: true
568machineFunctionInfo: {}
569body:             |
570  bb.0:
571    liveins: $x0, $x1, $x2
572    liveins: $w1, $x0
573
574    ; CHECK-FAST-LABEL: name: ldrhrox_more_than_one_mem_use_shl
575    ; CHECK-FAST: liveins: $x0, $x1, $x2, $w1, $x0
576    ; CHECK-FAST-NEXT: {{  $}}
577    ; CHECK-FAST-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
578    ; CHECK-FAST-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
579    ; CHECK-FAST-NEXT: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY1]], 9, 31
580    ; CHECK-FAST-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, [[UBFMWri]], 0
581    ; CHECK-FAST-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
582    ; CHECK-FAST-NEXT: [[ANDXri:%[0-9]+]]:gpr64common = ANDXri [[SUBREG_TO_REG]], 4103
583    ; CHECK-FAST-NEXT: [[LDRHHroX:%[0-9]+]]:gpr32 = LDRHHroX [[COPY]], [[ANDXri]], 0, 1 :: (load (s16))
584    ; CHECK-FAST-NEXT: [[LDRHHroX1:%[0-9]+]]:gpr32 = LDRHHroX [[COPY]], [[ANDXri]], 0, 1 :: (load (s16))
585    ; CHECK-FAST-NEXT: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[LDRHHroX]], [[LDRHHroX1]]
586    ; CHECK-FAST-NEXT: RET_ReallyLR implicit [[ADDWrr]]
587    ;
588    ; CHECK-SLOW-LABEL: name: ldrhrox_more_than_one_mem_use_shl
589    ; CHECK-SLOW: liveins: $x0, $x1, $x2, $w1, $x0
590    ; CHECK-SLOW-NEXT: {{  $}}
591    ; CHECK-SLOW-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
592    ; CHECK-SLOW-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
593    ; CHECK-SLOW-NEXT: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY1]], 9, 31
594    ; CHECK-SLOW-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, [[UBFMWri]], 0
595    ; CHECK-SLOW-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
596    ; CHECK-SLOW-NEXT: [[COPY2:%[0-9]+]]:gpr32all = COPY [[SUBREG_TO_REG]].sub_32
597    ; CHECK-SLOW-NEXT: [[COPY3:%[0-9]+]]:gpr32 = COPY [[COPY2]]
598    ; CHECK-SLOW-NEXT: [[COPY4:%[0-9]+]]:gpr64common = COPY [[COPY]]
599    ; CHECK-SLOW-NEXT: [[ADDXrx:%[0-9]+]]:gpr64sp = ADDXrx [[COPY4]], [[COPY3]], 1
600    ; CHECK-SLOW-NEXT: [[LDRHHui:%[0-9]+]]:gpr32 = LDRHHui [[ADDXrx]], 0 :: (load (s16))
601    ; CHECK-SLOW-NEXT: [[LDRHHui1:%[0-9]+]]:gpr32 = LDRHHui [[ADDXrx]], 0 :: (load (s16))
602    ; CHECK-SLOW-NEXT: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[LDRHHui]], [[LDRHHui1]]
603    ; CHECK-SLOW-NEXT: RET_ReallyLR implicit [[ADDWrr]]
604    %0:gpr(p0) = COPY $x0
605    %1:gpr(s32) = COPY $w1
606    %15:gpr(s64) = G_CONSTANT i64 9
607    %3:gpr(s32) = G_LSHR %1, %15(s64)
608    %4:gpr(s64) = G_ZEXT %3(s32)
609    %5:gpr(s64) = G_CONSTANT i64 255
610    %6:gpr(s64) = G_AND %4, %5
611    %13:gpr(s64) = G_CONSTANT i64 1
612    %8:gpr(s64) = G_SHL %6, %13(s64)
613    %9:gpr(p0) = G_PTR_ADD %0, %8(s64)
614    %12:gpr(s32) = G_LOAD %9(p0) :: (load (s16))
615    %17:gpr(s32) = G_LOAD %9(p0) :: (load (s16))
616    %18:gpr(s32) = G_ADD %12, %17
617    RET_ReallyLR implicit %18
618...
619---
620# Fold SHL into LSL for memory ops. Do not fold if the target has LSLSLOW14.
621name:            ldrhrox_more_than_one_use_shl
622alignment:       4
623legalized:       true
624regBankSelected: true
625tracksRegLiveness: true
626machineFunctionInfo: {}
627body:             |
628  bb.0:
629    liveins: $x0, $x1, $x2
630    liveins: $w1, $x0
631
632    ; CHECK-FAST-LABEL: name: ldrhrox_more_than_one_use_shl
633    ; CHECK-FAST: liveins: $x0, $x1, $x2, $w1, $x0
634    ; CHECK-FAST-NEXT: {{  $}}
635    ; CHECK-FAST-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
636    ; CHECK-FAST-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
637    ; CHECK-FAST-NEXT: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY1]], 9, 31
638    ; CHECK-FAST-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, [[UBFMWri]], 0
639    ; CHECK-FAST-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
640    ; CHECK-FAST-NEXT: [[ANDXri:%[0-9]+]]:gpr64common = ANDXri [[SUBREG_TO_REG]], 4103
641    ; CHECK-FAST-NEXT: [[LDRHHroX:%[0-9]+]]:gpr32 = LDRHHroX [[COPY]], [[ANDXri]], 0, 1 :: (load (s16))
642    ; CHECK-FAST-NEXT: [[LDRHHroX1:%[0-9]+]]:gpr32 = LDRHHroX [[COPY]], [[ANDXri]], 0, 1 :: (load (s16))
643    ; CHECK-FAST-NEXT: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[LDRHHroX]], [[LDRHHroX1]]
644    ; CHECK-FAST-NEXT: RET_ReallyLR implicit [[ADDWrr]]
645    ;
646    ; CHECK-SLOW-LABEL: name: ldrhrox_more_than_one_use_shl
647    ; CHECK-SLOW: liveins: $x0, $x1, $x2, $w1, $x0
648    ; CHECK-SLOW-NEXT: {{  $}}
649    ; CHECK-SLOW-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
650    ; CHECK-SLOW-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
651    ; CHECK-SLOW-NEXT: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY1]], 9, 31
652    ; CHECK-SLOW-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, [[UBFMWri]], 0
653    ; CHECK-SLOW-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
654    ; CHECK-SLOW-NEXT: [[COPY2:%[0-9]+]]:gpr32all = COPY [[SUBREG_TO_REG]].sub_32
655    ; CHECK-SLOW-NEXT: [[COPY3:%[0-9]+]]:gpr32 = COPY [[COPY2]]
656    ; CHECK-SLOW-NEXT: [[COPY4:%[0-9]+]]:gpr64common = COPY [[COPY]]
657    ; CHECK-SLOW-NEXT: [[ADDXrx:%[0-9]+]]:gpr64sp = ADDXrx [[COPY4]], [[COPY3]], 1
658    ; CHECK-SLOW-NEXT: [[LDRHHui:%[0-9]+]]:gpr32 = LDRHHui [[ADDXrx]], 0 :: (load (s16))
659    ; CHECK-SLOW-NEXT: [[LDRHHui1:%[0-9]+]]:gpr32 = LDRHHui [[ADDXrx]], 0 :: (load (s16))
660    ; CHECK-SLOW-NEXT: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[LDRHHui]], [[LDRHHui1]]
661    ; CHECK-SLOW-NEXT: RET_ReallyLR implicit [[ADDWrr]]
662    %0:gpr(p0) = COPY $x0
663    %1:gpr(s32) = COPY $w1
664    %15:gpr(s64) = G_CONSTANT i64 9
665    %3:gpr(s32) = G_LSHR %1, %15(s64)
666    %4:gpr(s64) = G_ZEXT %3(s32)
667    %5:gpr(s64) = G_CONSTANT i64 255
668    %6:gpr(s64) = G_AND %4, %5
669    %13:gpr(s64) = G_CONSTANT i64 1
670    %8:gpr(s64) = G_SHL %6, %13(s64)
671    %9:gpr(p0) = G_PTR_ADD %0, %8(s64)
672    %12:gpr(s32) = G_LOAD %9(p0) :: (load (s16))
673    %17:gpr(s32) = G_LOAD %9(p0) :: (load (s16))
674    %18:gpr(s32) = G_ADD %12, %17
675    RET_ReallyLR implicit %18
676...
677---
678# Fold SHL into LSL for memory ops.
679name:            ldrwrox_more_than_one_use_shl
680alignment:       4
681legalized:       true
682regBankSelected: true
683tracksRegLiveness: true
684machineFunctionInfo: {}
685body:             |
686  bb.0:
687    liveins: $x0, $x1, $x2
688    ; CHECK-LABEL: name: ldrwrox_more_than_one_use_shl
689    ; CHECK: liveins: $x0, $x1, $x2
690    ; CHECK-NEXT: {{  $}}
691    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
692    ; CHECK-NEXT: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 62, 61
693    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64common = COPY $x1
694    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY [[COPY1]]
695    ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[UBFMXri]]
696    ; CHECK-NEXT: [[LDRWroX:%[0-9]+]]:gpr32 = LDRWroX [[COPY1]], [[COPY]], 0, 1 :: (load (s32) from %ir.addr)
697    ; CHECK-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, [[LDRWroX]], 0
698    ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
699    ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 2, 0
700    ; CHECK-NEXT: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[SUBREG_TO_REG]], [[ADDXri]]
701    ; CHECK-NEXT: [[ADDXrr2:%[0-9]+]]:gpr64 = ADDXrr [[ADDXrr]], [[ADDXrr1]]
702    ; CHECK-NEXT: $x2 = COPY [[ADDXrr2]]
703    ; CHECK-NEXT: RET_ReallyLR implicit $x2
704    %0:gpr(s64) = COPY $x0
705    %1:gpr(s64) = G_CONSTANT i64 2
706    %2:gpr(s64) = G_SHL %0, %1(s64)
707    %3:gpr(p0) = COPY $x1
708    %4:gpr(p0) = G_PTR_ADD %3, %2
709    %20:gpr(s32) = G_LOAD %4(p0) :: (load (s32) from %ir.addr)
710    %5:gpr(s64) = G_ZEXT %20
711    %6:gpr(s64) = G_ADD %2, %1
712    %7:gpr(s64) = G_ADD %5, %6
713    %8:gpr(s64) = G_PTRTOINT %4
714    %9:gpr(s64) = G_ADD %8, %7
715    $x2 = COPY %9(s64)
716    RET_ReallyLR implicit $x2
717...
718---
719# Fold SHL into LSL for memory ops. Do not fold if the target has LSLSLOW14.
720name:            ldrqrox_more_than_one_use_shl
721alignment:       4
722legalized:       true
723regBankSelected: true
724tracksRegLiveness: true
725machineFunctionInfo: {}
726body:             |
727  bb.0:
728    liveins: $x0, $x1, $x2
729    ; CHECK-FAST-LABEL: name: ldrqrox_more_than_one_use_shl
730    ; CHECK-FAST: liveins: $x0, $x1, $x2
731    ; CHECK-FAST-NEXT: {{  $}}
732    ; CHECK-FAST-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
733    ; CHECK-FAST-NEXT: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 60, 59
734    ; CHECK-FAST-NEXT: [[COPY1:%[0-9]+]]:gpr64common = COPY $x1
735    ; CHECK-FAST-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY [[COPY1]]
736    ; CHECK-FAST-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[UBFMXri]]
737    ; CHECK-FAST-NEXT: [[LDRQroX:%[0-9]+]]:fpr128 = LDRQroX [[COPY1]], [[COPY]], 0, 1 :: (load (s128) from %ir.addr)
738    ; CHECK-FAST-NEXT: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 4, 0
739    ; CHECK-FAST-NEXT: [[COPY3:%[0-9]+]]:fpr64 = COPY [[LDRQroX]].dsub
740    ; CHECK-FAST-NEXT: [[COPY4:%[0-9]+]]:gpr64 = COPY [[COPY3]]
741    ; CHECK-FAST-NEXT: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[COPY4]], [[ADDXri]]
742    ; CHECK-FAST-NEXT: [[ADDXrr2:%[0-9]+]]:gpr64 = ADDXrr [[ADDXrr]], [[ADDXrr1]]
743    ; CHECK-FAST-NEXT: RET_ReallyLR implicit [[ADDXrr2]]
744    ;
745    ; CHECK-SLOW-LABEL: name: ldrqrox_more_than_one_use_shl
746    ; CHECK-SLOW: liveins: $x0, $x1, $x2
747    ; CHECK-SLOW-NEXT: {{  $}}
748    ; CHECK-SLOW-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
749    ; CHECK-SLOW-NEXT: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 60, 59
750    ; CHECK-SLOW-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
751    ; CHECK-SLOW-NEXT: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY1]], [[UBFMXri]]
752    ; CHECK-SLOW-NEXT: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[ADDXrr]], 0 :: (load (s128) from %ir.addr)
753    ; CHECK-SLOW-NEXT: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 4, 0
754    ; CHECK-SLOW-NEXT: [[COPY2:%[0-9]+]]:fpr64 = COPY [[LDRQui]].dsub
755    ; CHECK-SLOW-NEXT: [[COPY3:%[0-9]+]]:gpr64 = COPY [[COPY2]]
756    ; CHECK-SLOW-NEXT: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[COPY3]], [[ADDXri]]
757    ; CHECK-SLOW-NEXT: [[COPY4:%[0-9]+]]:gpr64 = COPY [[ADDXrr]]
758    ; CHECK-SLOW-NEXT: [[ADDXrr2:%[0-9]+]]:gpr64 = ADDXrr [[COPY4]], [[ADDXrr1]]
759    ; CHECK-SLOW-NEXT: RET_ReallyLR implicit [[ADDXrr2]]
760    %0:gpr(s64) = COPY $x0
761    %1:gpr(s64) = G_CONSTANT i64 4
762    %2:gpr(s64) = G_SHL %0, %1(s64)
763    %3:gpr(p0) = COPY $x1
764    %4:gpr(p0) = G_PTR_ADD %3, %2
765    %20:fpr(s128) = G_LOAD %4(p0) :: (load (s128) from %ir.addr)
766    %6:gpr(s64) = G_ADD %2, %1
767    %200:fpr(s64) = G_TRUNC %20
768    %2000:gpr(s64) = COPY %200
769    %7:gpr(s64) = G_ADD %2000, %6
770    %8:gpr(s64) = G_PTRTOINT %4
771    %9:gpr(s64) = G_ADD %8, %7
772    RET_ReallyLR implicit %9
773...
774---
775# Show that when we have a fastpath for shift-left, we perform the folding
776# if it has more than one use.
777
778name:            more_than_one_use_shl_lsl
779alignment:       4
780legalized:       true
781regBankSelected: true
782tracksRegLiveness: true
783machineFunctionInfo: {}
784body:             |
785  bb.0:
786    liveins: $x0, $x1, $x2
787    ; CHECK-LABEL: name: more_than_one_use_shl_lsl
788    ; CHECK: liveins: $x0, $x1, $x2
789    ; CHECK-NEXT: {{  $}}
790    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
791    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
792    ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
793    ; CHECK-NEXT: [[LDRXroX1:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
794    ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[LDRXroX1]]
795    ; CHECK-NEXT: $x2 = COPY [[ADDXrr]]
796    ; CHECK-NEXT: RET_ReallyLR implicit $x2
797    %0:gpr(s64) = COPY $x0
798    %1:gpr(s64) = G_CONSTANT i64 3
799    %2:gpr(s64) = G_SHL %0, %1(s64)
800    %3:gpr(p0) = COPY $x1
801    %4:gpr(p0) = G_PTR_ADD %3, %2
802    %5:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
803    %6:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
804    %7:gpr(s64) = G_ADD %5, %6
805    $x2 = COPY %7(s64)
806    RET_ReallyLR implicit $x2
807
808...
809---
810# Show that when we're optimizing for size, we'll do the folding no matter
811# what.
812
813name:            more_than_one_use_shl_minsize
814alignment:       4
815legalized:       true
816regBankSelected: true
817tracksRegLiveness: true
818machineFunctionInfo: {}
819body:             |
820  bb.0:
821    liveins: $x0, $x1, $x2
822    ; CHECK-LABEL: name: more_than_one_use_shl_minsize
823    ; CHECK: liveins: $x0, $x1, $x2
824    ; CHECK-NEXT: {{  $}}
825    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
826    ; CHECK-NEXT: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
827    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64common = COPY $x1
828    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY [[COPY1]]
829    ; CHECK-NEXT: [[ADDXrs:%[0-9]+]]:gpr64 = ADDXrs [[COPY2]], [[COPY]], 3
830    ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
831    ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
832    ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[ADDXri]]
833    ; CHECK-NEXT: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[ADDXrs]], [[ADDXrr]]
834    ; CHECK-NEXT: $x2 = COPY [[ADDXrr1]]
835    ; CHECK-NEXT: RET_ReallyLR implicit $x2
836    %0:gpr(s64) = COPY $x0
837    %1:gpr(s64) = G_CONSTANT i64 3
838    %2:gpr(s64) = G_SHL %0, %1(s64)
839    %3:gpr(p0) = COPY $x1
840    %4:gpr(p0) = G_PTR_ADD %3, %2
841    %5:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
842    %6:gpr(s64) = G_ADD %2, %1
843    %7:gpr(s64) = G_ADD %5, %6
844    %8:gpr(s64) = G_PTRTOINT %4
845    %9:gpr(s64) = G_ADD %8, %7
846    $x2 = COPY %9(s64)
847    RET_ReallyLR implicit $x2
848...
849---
850name:            ldrwrox
851alignment:       4
852legalized:       true
853regBankSelected: true
854tracksRegLiveness: true
855machineFunctionInfo: {}
856body:             |
857  bb.0:
858    liveins: $x0, $x1
859    ; CHECK-LABEL: name: ldrwrox
860    ; CHECK: liveins: $x0, $x1
861    ; CHECK-NEXT: {{  $}}
862    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
863    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
864    ; CHECK-NEXT: [[LDRWroX:%[0-9]+]]:gpr32 = LDRWroX [[COPY]], [[COPY1]], 0, 0 :: (load (s32) from %ir.addr)
865    ; CHECK-NEXT: $w2 = COPY [[LDRWroX]]
866    ; CHECK-NEXT: RET_ReallyLR implicit $w2
867    %0:gpr(p0) = COPY $x0
868    %1:gpr(s64) = COPY $x1
869    %2:gpr(p0) = G_PTR_ADD %0, %1
870    %4:gpr(s32) = G_LOAD %2(p0) :: (load (s32) from %ir.addr)
871    $w2 = COPY %4(s32)
872    RET_ReallyLR implicit $w2
873...
874---
875name:            ldrsrox
876alignment:       4
877legalized:       true
878regBankSelected: true
879tracksRegLiveness: true
880machineFunctionInfo: {}
881body:             |
882  bb.0:
883    liveins: $d0, $x1
884    ; CHECK-LABEL: name: ldrsrox
885    ; CHECK: liveins: $d0, $x1
886    ; CHECK-NEXT: {{  $}}
887    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
888    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
889    ; CHECK-NEXT: [[LDRSroX:%[0-9]+]]:fpr32 = LDRSroX [[COPY]], [[COPY1]], 0, 0 :: (load (s32) from %ir.addr)
890    ; CHECK-NEXT: $s2 = COPY [[LDRSroX]]
891    ; CHECK-NEXT: RET_ReallyLR implicit $h2
892    %0:gpr(p0) = COPY $d0
893    %1:gpr(s64) = COPY $x1
894    %2:gpr(p0) = G_PTR_ADD %0, %1
895    %4:fpr(s32) = G_LOAD %2(p0) :: (load (s32) from %ir.addr)
896    $s2 = COPY %4(s32)
897    RET_ReallyLR implicit $h2
898...
899---
900name:            ldrhrox
901alignment:       4
902legalized:       true
903regBankSelected: true
904tracksRegLiveness: true
905machineFunctionInfo: {}
906body:             |
907  bb.0:
908    liveins: $x0, $x1
909    ; CHECK-LABEL: name: ldrhrox
910    ; CHECK: liveins: $x0, $x1
911    ; CHECK-NEXT: {{  $}}
912    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
913    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
914    ; CHECK-NEXT: [[LDRHroX:%[0-9]+]]:fpr16 = LDRHroX [[COPY]], [[COPY1]], 0, 0 :: (load (s16) from %ir.addr)
915    ; CHECK-NEXT: $h2 = COPY [[LDRHroX]]
916    ; CHECK-NEXT: RET_ReallyLR implicit $h2
917    %0:gpr(p0) = COPY $x0
918    %1:gpr(s64) = COPY $x1
919    %2:gpr(p0) = G_PTR_ADD %0, %1
920    %4:fpr(s16) = G_LOAD %2(p0) :: (load (s16) from %ir.addr)
921    $h2 = COPY %4(s16)
922    RET_ReallyLR implicit $h2
923...
924---
925name:            ldbbrox
926alignment:       4
927legalized:       true
928regBankSelected: true
929tracksRegLiveness: true
930machineFunctionInfo: {}
931body:             |
932  bb.0:
933    liveins: $x0, $x1
934    ; CHECK-LABEL: name: ldbbrox
935    ; CHECK: liveins: $x0, $x1
936    ; CHECK-NEXT: {{  $}}
937    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
938    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
939    ; CHECK-NEXT: [[LDRBBroX:%[0-9]+]]:gpr32 = LDRBBroX [[COPY]], [[COPY1]], 0, 0 :: (load (s8) from %ir.addr)
940    ; CHECK-NEXT: $w2 = COPY [[LDRBBroX]]
941    ; CHECK-NEXT: RET_ReallyLR implicit $w2
942    %0:gpr(p0) = COPY $x0
943    %1:gpr(s64) = COPY $x1
944    %2:gpr(p0) = G_PTR_ADD %0, %1
945    %4:gpr(s32) = G_LOAD %2(p0) :: (load (s8) from %ir.addr)
946    $w2 = COPY %4(s32)
947    RET_ReallyLR implicit $w2
948...
949---
950name:            ldrqrox
951alignment:       4
952legalized:       true
953regBankSelected: true
954tracksRegLiveness: true
955machineFunctionInfo: {}
956body:             |
957  bb.0:
958    liveins: $d0, $x1
959    ; CHECK-LABEL: name: ldrqrox
960    ; CHECK: liveins: $d0, $x1
961    ; CHECK-NEXT: {{  $}}
962    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
963    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
964    ; CHECK-NEXT: [[LDRQroX:%[0-9]+]]:fpr128 = LDRQroX [[COPY]], [[COPY1]], 0, 0 :: (load (<2 x s64>) from %ir.addr)
965    ; CHECK-NEXT: $q0 = COPY [[LDRQroX]]
966    ; CHECK-NEXT: RET_ReallyLR implicit $q0
967    %0:gpr(p0) = COPY $d0
968    %1:gpr(s64) = COPY $x1
969    %2:gpr(p0) = G_PTR_ADD %0, %1
970    %4:fpr(<2 x s64>) = G_LOAD %2(p0) :: (load (<2 x s64>) from %ir.addr)
971    $q0 = COPY %4(<2 x s64>)
972    RET_ReallyLR implicit $q0
973