xref: /llvm-project/llvm/test/CodeGen/RISCV/rv64zba.ll (revision 9122c5235ec85ce0c0ad337e862b006e7b349d84)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \
3; RUN:   | FileCheck %s -check-prefixes=CHECK,RV64I
4; RUN: llc -mtriple=riscv64 -mattr=+m,+zba -verify-machineinstrs < %s \
5; RUN:   | FileCheck %s -check-prefixes=CHECK,RV64ZBA,RV64ZBANOZBB
6; RUN: llc -mtriple=riscv64 -mattr=+m,+zba,+zbb -verify-machineinstrs < %s \
7; RUN:   | FileCheck %s -check-prefixes=CHECK,RV64ZBA,RV64ZBAZBB,RV64ZBAZBBNOZBS
8; RUN: llc -mtriple=riscv64 -mattr=+m,+zba,+zbb,+zbs -verify-machineinstrs < %s \
9; RUN:   | FileCheck %s -check-prefixes=CHECK,RV64ZBA,RV64ZBAZBB,RV64ZBAZBBZBS
10
11define i64 @slliuw(i64 %a) nounwind {
12; RV64I-LABEL: slliuw:
13; RV64I:       # %bb.0:
14; RV64I-NEXT:    slli a0, a0, 32
15; RV64I-NEXT:    srli a0, a0, 31
16; RV64I-NEXT:    ret
17;
18; RV64ZBA-LABEL: slliuw:
19; RV64ZBA:       # %bb.0:
20; RV64ZBA-NEXT:    slli.uw a0, a0, 1
21; RV64ZBA-NEXT:    ret
22  %conv1 = shl i64 %a, 1
23  %shl = and i64 %conv1, 8589934590
24  ret i64 %shl
25}
26
27define i128 @slliuw_2(i32 signext %0, ptr %1) {
28; RV64I-LABEL: slliuw_2:
29; RV64I:       # %bb.0:
30; RV64I-NEXT:    slli a0, a0, 32
31; RV64I-NEXT:    srli a0, a0, 28
32; RV64I-NEXT:    add a1, a1, a0
33; RV64I-NEXT:    ld a0, 0(a1)
34; RV64I-NEXT:    ld a1, 8(a1)
35; RV64I-NEXT:    ret
36;
37; RV64ZBA-LABEL: slliuw_2:
38; RV64ZBA:       # %bb.0:
39; RV64ZBA-NEXT:    slli.uw a0, a0, 4
40; RV64ZBA-NEXT:    add a1, a1, a0
41; RV64ZBA-NEXT:    ld a0, 0(a1)
42; RV64ZBA-NEXT:    ld a1, 8(a1)
43; RV64ZBA-NEXT:    ret
44  %3 = zext i32 %0 to i64
45  %4 = getelementptr inbounds i128, ptr %1, i64 %3
46  %5 = load i128, ptr %4
47  ret i128 %5
48}
49
50define i64 @adduw(i64 %a, i64 %b) nounwind {
51; RV64I-LABEL: adduw:
52; RV64I:       # %bb.0:
53; RV64I-NEXT:    slli a1, a1, 32
54; RV64I-NEXT:    srli a1, a1, 32
55; RV64I-NEXT:    add a0, a1, a0
56; RV64I-NEXT:    ret
57;
58; RV64ZBA-LABEL: adduw:
59; RV64ZBA:       # %bb.0:
60; RV64ZBA-NEXT:    add.uw a0, a1, a0
61; RV64ZBA-NEXT:    ret
62  %and = and i64 %b, 4294967295
63  %add = add i64 %and, %a
64  ret i64 %add
65}
66
67define signext i8 @adduw_2(i32 signext %0, ptr %1) {
68; RV64I-LABEL: adduw_2:
69; RV64I:       # %bb.0:
70; RV64I-NEXT:    slli a0, a0, 32
71; RV64I-NEXT:    srli a0, a0, 32
72; RV64I-NEXT:    add a0, a1, a0
73; RV64I-NEXT:    lb a0, 0(a0)
74; RV64I-NEXT:    ret
75;
76; RV64ZBA-LABEL: adduw_2:
77; RV64ZBA:       # %bb.0:
78; RV64ZBA-NEXT:    add.uw a0, a0, a1
79; RV64ZBA-NEXT:    lb a0, 0(a0)
80; RV64ZBA-NEXT:    ret
81  %3 = zext i32 %0 to i64
82  %4 = getelementptr inbounds i8, ptr %1, i64 %3
83  %5 = load i8, ptr %4
84  ret i8 %5
85}
86
87define i64 @zextw_i64(i64 %a) nounwind {
88; RV64I-LABEL: zextw_i64:
89; RV64I:       # %bb.0:
90; RV64I-NEXT:    slli a0, a0, 32
91; RV64I-NEXT:    srli a0, a0, 32
92; RV64I-NEXT:    ret
93;
94; RV64ZBA-LABEL: zextw_i64:
95; RV64ZBA:       # %bb.0:
96; RV64ZBA-NEXT:    zext.w a0, a0
97; RV64ZBA-NEXT:    ret
98  %and = and i64 %a, 4294967295
99  ret i64 %and
100}
101
102; This makes sure targetShrinkDemandedConstant changes the and immmediate to
103; allow zext.w or slli+srli.
104define i64 @zextw_demandedbits_i64(i64 %0) {
105; RV64I-LABEL: zextw_demandedbits_i64:
106; RV64I:       # %bb.0:
107; RV64I-NEXT:    ori a0, a0, 1
108; RV64I-NEXT:    slli a0, a0, 32
109; RV64I-NEXT:    srli a0, a0, 32
110; RV64I-NEXT:    ret
111;
112; RV64ZBA-LABEL: zextw_demandedbits_i64:
113; RV64ZBA:       # %bb.0:
114; RV64ZBA-NEXT:    ori a0, a0, 1
115; RV64ZBA-NEXT:    zext.w a0, a0
116; RV64ZBA-NEXT:    ret
117  %2 = and i64 %0, 4294967294
118  %3 = or i64 %2, 1
119  ret i64 %3
120}
121
122define signext i16 @sh1add(i64 %0, ptr %1) {
123; RV64I-LABEL: sh1add:
124; RV64I:       # %bb.0:
125; RV64I-NEXT:    slli a0, a0, 1
126; RV64I-NEXT:    add a0, a1, a0
127; RV64I-NEXT:    lh a0, 0(a0)
128; RV64I-NEXT:    ret
129;
130; RV64ZBA-LABEL: sh1add:
131; RV64ZBA:       # %bb.0:
132; RV64ZBA-NEXT:    sh1add a0, a0, a1
133; RV64ZBA-NEXT:    lh a0, 0(a0)
134; RV64ZBA-NEXT:    ret
135  %3 = getelementptr inbounds i16, ptr %1, i64 %0
136  %4 = load i16, ptr %3
137  ret i16 %4
138}
139
140define signext i32 @sh2add(i64 %0, ptr %1) {
141; RV64I-LABEL: sh2add:
142; RV64I:       # %bb.0:
143; RV64I-NEXT:    slli a0, a0, 2
144; RV64I-NEXT:    add a0, a1, a0
145; RV64I-NEXT:    lw a0, 0(a0)
146; RV64I-NEXT:    ret
147;
148; RV64ZBA-LABEL: sh2add:
149; RV64ZBA:       # %bb.0:
150; RV64ZBA-NEXT:    sh2add a0, a0, a1
151; RV64ZBA-NEXT:    lw a0, 0(a0)
152; RV64ZBA-NEXT:    ret
153  %3 = getelementptr inbounds i32, ptr %1, i64 %0
154  %4 = load i32, ptr %3
155  ret i32 %4
156}
157
158define i64 @sh3add(i64 %0, ptr %1) {
159; RV64I-LABEL: sh3add:
160; RV64I:       # %bb.0:
161; RV64I-NEXT:    slli a0, a0, 3
162; RV64I-NEXT:    add a0, a1, a0
163; RV64I-NEXT:    ld a0, 0(a0)
164; RV64I-NEXT:    ret
165;
166; RV64ZBA-LABEL: sh3add:
167; RV64ZBA:       # %bb.0:
168; RV64ZBA-NEXT:    sh3add a0, a0, a1
169; RV64ZBA-NEXT:    ld a0, 0(a0)
170; RV64ZBA-NEXT:    ret
171  %3 = getelementptr inbounds i64, ptr %1, i64 %0
172  %4 = load i64, ptr %3
173  ret i64 %4
174}
175
176define signext i16 @sh1adduw(i32 signext %0, ptr %1) {
177; RV64I-LABEL: sh1adduw:
178; RV64I:       # %bb.0:
179; RV64I-NEXT:    slli a0, a0, 32
180; RV64I-NEXT:    srli a0, a0, 31
181; RV64I-NEXT:    add a0, a1, a0
182; RV64I-NEXT:    lh a0, 0(a0)
183; RV64I-NEXT:    ret
184;
185; RV64ZBA-LABEL: sh1adduw:
186; RV64ZBA:       # %bb.0:
187; RV64ZBA-NEXT:    sh1add.uw a0, a0, a1
188; RV64ZBA-NEXT:    lh a0, 0(a0)
189; RV64ZBA-NEXT:    ret
190  %3 = zext i32 %0 to i64
191  %4 = getelementptr inbounds i16, ptr %1, i64 %3
192  %5 = load i16, ptr %4
193  ret i16 %5
194}
195
196define i64 @sh1adduw_2(i64 %0, i64 %1) {
197; RV64I-LABEL: sh1adduw_2:
198; RV64I:       # %bb.0:
199; RV64I-NEXT:    slli a0, a0, 32
200; RV64I-NEXT:    srli a0, a0, 31
201; RV64I-NEXT:    add a0, a0, a1
202; RV64I-NEXT:    ret
203;
204; RV64ZBA-LABEL: sh1adduw_2:
205; RV64ZBA:       # %bb.0:
206; RV64ZBA-NEXT:    sh1add.uw a0, a0, a1
207; RV64ZBA-NEXT:    ret
208  %3 = shl i64 %0, 1
209  %4 = and i64 %3, 8589934590
210  %5 = add i64 %4, %1
211  ret i64 %5
212}
213
214define i64 @sh1adduw_3(i64 %0, i64 %1) {
215; RV64I-LABEL: sh1adduw_3:
216; RV64I:       # %bb.0:
217; RV64I-NEXT:    slli a0, a0, 32
218; RV64I-NEXT:    srli a0, a0, 31
219; RV64I-NEXT:    or a0, a0, a1
220; RV64I-NEXT:    ret
221;
222; RV64ZBA-LABEL: sh1adduw_3:
223; RV64ZBA:       # %bb.0:
224; RV64ZBA-NEXT:    sh1add.uw a0, a0, a1
225; RV64ZBA-NEXT:    ret
226  %3 = shl i64 %0, 1
227  %4 = and i64 %3, 8589934590
228  %5 = or disjoint i64 %4, %1
229  ret i64 %5
230}
231
232define signext i32 @sh2adduw(i32 signext %0, ptr %1) {
233; RV64I-LABEL: sh2adduw:
234; RV64I:       # %bb.0:
235; RV64I-NEXT:    slli a0, a0, 32
236; RV64I-NEXT:    srli a0, a0, 30
237; RV64I-NEXT:    add a0, a1, a0
238; RV64I-NEXT:    lw a0, 0(a0)
239; RV64I-NEXT:    ret
240;
241; RV64ZBA-LABEL: sh2adduw:
242; RV64ZBA:       # %bb.0:
243; RV64ZBA-NEXT:    sh2add.uw a0, a0, a1
244; RV64ZBA-NEXT:    lw a0, 0(a0)
245; RV64ZBA-NEXT:    ret
246  %3 = zext i32 %0 to i64
247  %4 = getelementptr inbounds i32, ptr %1, i64 %3
248  %5 = load i32, ptr %4
249  ret i32 %5
250}
251
252define i64 @sh2adduw_2(i64 %0, i64 %1) {
253; RV64I-LABEL: sh2adduw_2:
254; RV64I:       # %bb.0:
255; RV64I-NEXT:    slli a0, a0, 32
256; RV64I-NEXT:    srli a0, a0, 30
257; RV64I-NEXT:    add a0, a0, a1
258; RV64I-NEXT:    ret
259;
260; RV64ZBA-LABEL: sh2adduw_2:
261; RV64ZBA:       # %bb.0:
262; RV64ZBA-NEXT:    sh2add.uw a0, a0, a1
263; RV64ZBA-NEXT:    ret
264  %3 = shl i64 %0, 2
265  %4 = and i64 %3, 17179869180
266  %5 = add i64 %4, %1
267  ret i64 %5
268}
269
270define i64 @sh2adduw_3(i64 %0, i64 %1) {
271; RV64I-LABEL: sh2adduw_3:
272; RV64I:       # %bb.0:
273; RV64I-NEXT:    slli a0, a0, 32
274; RV64I-NEXT:    srli a0, a0, 30
275; RV64I-NEXT:    or a0, a0, a1
276; RV64I-NEXT:    ret
277;
278; RV64ZBA-LABEL: sh2adduw_3:
279; RV64ZBA:       # %bb.0:
280; RV64ZBA-NEXT:    sh2add.uw a0, a0, a1
281; RV64ZBA-NEXT:    ret
282  %3 = shl i64 %0, 2
283  %4 = and i64 %3, 17179869180
284  %5 = or disjoint i64 %4, %1
285  ret i64 %5
286}
287
288define i64 @sh3adduw(i32 signext %0, ptr %1) {
289; RV64I-LABEL: sh3adduw:
290; RV64I:       # %bb.0:
291; RV64I-NEXT:    slli a0, a0, 32
292; RV64I-NEXT:    srli a0, a0, 29
293; RV64I-NEXT:    add a0, a1, a0
294; RV64I-NEXT:    ld a0, 0(a0)
295; RV64I-NEXT:    ret
296;
297; RV64ZBA-LABEL: sh3adduw:
298; RV64ZBA:       # %bb.0:
299; RV64ZBA-NEXT:    sh3add.uw a0, a0, a1
300; RV64ZBA-NEXT:    ld a0, 0(a0)
301; RV64ZBA-NEXT:    ret
302  %3 = zext i32 %0 to i64
303  %4 = getelementptr inbounds i64, ptr %1, i64 %3
304  %5 = load i64, ptr %4
305  ret i64 %5
306}
307
308define i64 @sh3adduw_2(i64 %0, i64 %1) {
309; RV64I-LABEL: sh3adduw_2:
310; RV64I:       # %bb.0:
311; RV64I-NEXT:    slli a0, a0, 32
312; RV64I-NEXT:    srli a0, a0, 29
313; RV64I-NEXT:    add a0, a0, a1
314; RV64I-NEXT:    ret
315;
316; RV64ZBA-LABEL: sh3adduw_2:
317; RV64ZBA:       # %bb.0:
318; RV64ZBA-NEXT:    sh3add.uw a0, a0, a1
319; RV64ZBA-NEXT:    ret
320  %3 = shl i64 %0, 3
321  %4 = and i64 %3, 34359738360
322  %5 = add i64 %4, %1
323  ret i64 %5
324}
325
326define i64 @sh3adduw_3(i64 %0, i64 %1) {
327; RV64I-LABEL: sh3adduw_3:
328; RV64I:       # %bb.0:
329; RV64I-NEXT:    slli a0, a0, 32
330; RV64I-NEXT:    srli a0, a0, 29
331; RV64I-NEXT:    or a0, a0, a1
332; RV64I-NEXT:    ret
333;
334; RV64ZBA-LABEL: sh3adduw_3:
335; RV64ZBA:       # %bb.0:
336; RV64ZBA-NEXT:    sh3add.uw a0, a0, a1
337; RV64ZBA-NEXT:    ret
338  %3 = shl i64 %0, 3
339  %4 = and i64 %3, 34359738360
340  %5 = or disjoint i64 %4, %1
341  ret i64 %5
342}
343
344; Type legalization inserts a sext_inreg after the first add. That add will be
345; selected as sh2add which does not sign extend. SimplifyDemandedBits is unable
346; to remove the sext_inreg because it has multiple uses. The ashr will use the
347; sext_inreg to become sraiw. This leaves the sext_inreg only used by the shl.
348; If the shl is selected as sllw, we don't need the sext_inreg.
349define i64 @sh2add_extra_sext(i32 %x, i32 %y, i32 %z) {
350; RV64I-LABEL: sh2add_extra_sext:
351; RV64I:       # %bb.0:
352; RV64I-NEXT:    slli a0, a0, 2
353; RV64I-NEXT:    add a0, a0, a1
354; RV64I-NEXT:    sllw a1, a2, a0
355; RV64I-NEXT:    sraiw a0, a0, 2
356; RV64I-NEXT:    mul a0, a1, a0
357; RV64I-NEXT:    ret
358;
359; RV64ZBA-LABEL: sh2add_extra_sext:
360; RV64ZBA:       # %bb.0:
361; RV64ZBA-NEXT:    sh2add a0, a0, a1
362; RV64ZBA-NEXT:    sllw a1, a2, a0
363; RV64ZBA-NEXT:    sraiw a0, a0, 2
364; RV64ZBA-NEXT:    mul a0, a1, a0
365; RV64ZBA-NEXT:    ret
366  %a = shl i32 %x, 2
367  %b = add i32 %a, %y
368  %c = shl i32 %z, %b
369  %d = ashr i32 %b, 2
370  %e = sext i32 %c to i64
371  %f = sext i32 %d to i64
372  %g = mul i64 %e, %f
373  ret i64 %g
374}
375
376define i64 @addmul6(i64 %a, i64 %b) {
377; RV64I-LABEL: addmul6:
378; RV64I:       # %bb.0:
379; RV64I-NEXT:    slli a2, a0, 1
380; RV64I-NEXT:    slli a0, a0, 3
381; RV64I-NEXT:    sub a0, a0, a2
382; RV64I-NEXT:    add a0, a0, a1
383; RV64I-NEXT:    ret
384;
385; RV64ZBA-LABEL: addmul6:
386; RV64ZBA:       # %bb.0:
387; RV64ZBA-NEXT:    sh1add a0, a0, a0
388; RV64ZBA-NEXT:    sh1add a0, a0, a1
389; RV64ZBA-NEXT:    ret
390  %c = mul i64 %a, 6
391  %d = add i64 %c, %b
392  ret i64 %d
393}
394
395define i64 @disjointormul6(i64 %a, i64 %b) {
396; RV64I-LABEL: disjointormul6:
397; RV64I:       # %bb.0:
398; RV64I-NEXT:    slli a2, a0, 1
399; RV64I-NEXT:    slli a0, a0, 3
400; RV64I-NEXT:    sub a0, a0, a2
401; RV64I-NEXT:    or a0, a0, a1
402; RV64I-NEXT:    ret
403;
404; RV64ZBA-LABEL: disjointormul6:
405; RV64ZBA:       # %bb.0:
406; RV64ZBA-NEXT:    sh1add a0, a0, a0
407; RV64ZBA-NEXT:    sh1add a0, a0, a1
408; RV64ZBA-NEXT:    ret
409  %c = mul i64 %a, 6
410  %d = or disjoint i64 %c, %b
411  ret i64 %d
412}
413
414define i64 @addmul10(i64 %a, i64 %b) {
415; RV64I-LABEL: addmul10:
416; RV64I:       # %bb.0:
417; RV64I-NEXT:    li a2, 10
418; RV64I-NEXT:    mul a0, a0, a2
419; RV64I-NEXT:    add a0, a0, a1
420; RV64I-NEXT:    ret
421;
422; RV64ZBA-LABEL: addmul10:
423; RV64ZBA:       # %bb.0:
424; RV64ZBA-NEXT:    sh2add a0, a0, a0
425; RV64ZBA-NEXT:    sh1add a0, a0, a1
426; RV64ZBA-NEXT:    ret
427  %c = mul i64 %a, 10
428  %d = add i64 %c, %b
429  ret i64 %d
430}
431
432define i64 @addmul12(i64 %a, i64 %b) {
433; RV64I-LABEL: addmul12:
434; RV64I:       # %bb.0:
435; RV64I-NEXT:    slli a2, a0, 2
436; RV64I-NEXT:    slli a0, a0, 4
437; RV64I-NEXT:    sub a0, a0, a2
438; RV64I-NEXT:    add a0, a0, a1
439; RV64I-NEXT:    ret
440;
441; RV64ZBA-LABEL: addmul12:
442; RV64ZBA:       # %bb.0:
443; RV64ZBA-NEXT:    sh1add a0, a0, a0
444; RV64ZBA-NEXT:    sh2add a0, a0, a1
445; RV64ZBA-NEXT:    ret
446  %c = mul i64 %a, 12
447  %d = add i64 %c, %b
448  ret i64 %d
449}
450
451define i64 @addmul18(i64 %a, i64 %b) {
452; RV64I-LABEL: addmul18:
453; RV64I:       # %bb.0:
454; RV64I-NEXT:    li a2, 18
455; RV64I-NEXT:    mul a0, a0, a2
456; RV64I-NEXT:    add a0, a0, a1
457; RV64I-NEXT:    ret
458;
459; RV64ZBA-LABEL: addmul18:
460; RV64ZBA:       # %bb.0:
461; RV64ZBA-NEXT:    sh3add a0, a0, a0
462; RV64ZBA-NEXT:    sh1add a0, a0, a1
463; RV64ZBA-NEXT:    ret
464  %c = mul i64 %a, 18
465  %d = add i64 %c, %b
466  ret i64 %d
467}
468
469define i64 @addmul20(i64 %a, i64 %b) {
470; RV64I-LABEL: addmul20:
471; RV64I:       # %bb.0:
472; RV64I-NEXT:    li a2, 20
473; RV64I-NEXT:    mul a0, a0, a2
474; RV64I-NEXT:    add a0, a0, a1
475; RV64I-NEXT:    ret
476;
477; RV64ZBA-LABEL: addmul20:
478; RV64ZBA:       # %bb.0:
479; RV64ZBA-NEXT:    sh2add a0, a0, a0
480; RV64ZBA-NEXT:    sh2add a0, a0, a1
481; RV64ZBA-NEXT:    ret
482  %c = mul i64 %a, 20
483  %d = add i64 %c, %b
484  ret i64 %d
485}
486
487define i64 @addmul22(i64 %a, i64 %b) {
488; CHECK-LABEL: addmul22:
489; CHECK:       # %bb.0:
490; CHECK-NEXT:    li a2, 22
491; CHECK-NEXT:    mul a0, a0, a2
492; CHECK-NEXT:    add a0, a0, a1
493; CHECK-NEXT:    ret
494  %c = mul i64 %a, 22
495  %d = add i64 %c, %b
496  ret i64 %d
497}
498
499define i64 @addmul24(i64 %a, i64 %b) {
500; RV64I-LABEL: addmul24:
501; RV64I:       # %bb.0:
502; RV64I-NEXT:    slli a2, a0, 3
503; RV64I-NEXT:    slli a0, a0, 5
504; RV64I-NEXT:    sub a0, a0, a2
505; RV64I-NEXT:    add a0, a0, a1
506; RV64I-NEXT:    ret
507;
508; RV64ZBA-LABEL: addmul24:
509; RV64ZBA:       # %bb.0:
510; RV64ZBA-NEXT:    sh1add a0, a0, a0
511; RV64ZBA-NEXT:    sh3add a0, a0, a1
512; RV64ZBA-NEXT:    ret
513  %c = mul i64 %a, 24
514  %d = add i64 %c, %b
515  ret i64 %d
516}
517
518define i64 @addmul36(i64 %a, i64 %b) {
519; RV64I-LABEL: addmul36:
520; RV64I:       # %bb.0:
521; RV64I-NEXT:    li a2, 36
522; RV64I-NEXT:    mul a0, a0, a2
523; RV64I-NEXT:    add a0, a0, a1
524; RV64I-NEXT:    ret
525;
526; RV64ZBA-LABEL: addmul36:
527; RV64ZBA:       # %bb.0:
528; RV64ZBA-NEXT:    sh3add a0, a0, a0
529; RV64ZBA-NEXT:    sh2add a0, a0, a1
530; RV64ZBA-NEXT:    ret
531  %c = mul i64 %a, 36
532  %d = add i64 %c, %b
533  ret i64 %d
534}
535
536define i64 @addmul40(i64 %a, i64 %b) {
537; RV64I-LABEL: addmul40:
538; RV64I:       # %bb.0:
539; RV64I-NEXT:    li a2, 40
540; RV64I-NEXT:    mul a0, a0, a2
541; RV64I-NEXT:    add a0, a0, a1
542; RV64I-NEXT:    ret
543;
544; RV64ZBA-LABEL: addmul40:
545; RV64ZBA:       # %bb.0:
546; RV64ZBA-NEXT:    sh2add a0, a0, a0
547; RV64ZBA-NEXT:    sh3add a0, a0, a1
548; RV64ZBA-NEXT:    ret
549  %c = mul i64 %a, 40
550  %d = add i64 %c, %b
551  ret i64 %d
552}
553
554define i64 @addmul72(i64 %a, i64 %b) {
555; RV64I-LABEL: addmul72:
556; RV64I:       # %bb.0:
557; RV64I-NEXT:    li a2, 72
558; RV64I-NEXT:    mul a0, a0, a2
559; RV64I-NEXT:    add a0, a0, a1
560; RV64I-NEXT:    ret
561;
562; RV64ZBA-LABEL: addmul72:
563; RV64ZBA:       # %bb.0:
564; RV64ZBA-NEXT:    sh3add a0, a0, a0
565; RV64ZBA-NEXT:    sh3add a0, a0, a1
566; RV64ZBA-NEXT:    ret
567  %c = mul i64 %a, 72
568  %d = add i64 %c, %b
569  ret i64 %d
570}
571
572define i64 @mul50(i64 %a) {
573; RV64I-LABEL: mul50:
574; RV64I:       # %bb.0:
575; RV64I-NEXT:    li a1, 50
576; RV64I-NEXT:    mul a0, a0, a1
577; RV64I-NEXT:    ret
578;
579; RV64ZBA-LABEL: mul50:
580; RV64ZBA:       # %bb.0:
581; RV64ZBA-NEXT:    sh2add a0, a0, a0
582; RV64ZBA-NEXT:    sh2add a0, a0, a0
583; RV64ZBA-NEXT:    slli a0, a0, 1
584; RV64ZBA-NEXT:    ret
585  %c = mul i64 %a, 50
586  ret i64 %c
587}
588
589define i64 @addmul50(i64 %a, i64 %b) {
590; RV64I-LABEL: addmul50:
591; RV64I:       # %bb.0:
592; RV64I-NEXT:    li a2, 50
593; RV64I-NEXT:    mul a0, a0, a2
594; RV64I-NEXT:    add a0, a0, a1
595; RV64I-NEXT:    ret
596;
597; RV64ZBA-LABEL: addmul50:
598; RV64ZBA:       # %bb.0:
599; RV64ZBA-NEXT:    sh2add a0, a0, a0
600; RV64ZBA-NEXT:    sh2add a0, a0, a0
601; RV64ZBA-NEXT:    sh1add a0, a0, a1
602; RV64ZBA-NEXT:    ret
603  %c = mul i64 %a, 50
604  %d = add i64 %c, %b
605  ret i64 %d
606}
607
608define i64 @mul100(i64 %a) {
609; RV64I-LABEL: mul100:
610; RV64I:       # %bb.0:
611; RV64I-NEXT:    li a1, 100
612; RV64I-NEXT:    mul a0, a0, a1
613; RV64I-NEXT:    ret
614;
615; RV64ZBA-LABEL: mul100:
616; RV64ZBA:       # %bb.0:
617; RV64ZBA-NEXT:    sh2add a0, a0, a0
618; RV64ZBA-NEXT:    sh2add a0, a0, a0
619; RV64ZBA-NEXT:    slli a0, a0, 2
620; RV64ZBA-NEXT:    ret
621  %c = mul i64 %a, 100
622  ret i64 %c
623}
624
625define i64 @addmul100(i64 %a, i64 %b) {
626; RV64I-LABEL: addmul100:
627; RV64I:       # %bb.0:
628; RV64I-NEXT:    li a2, 100
629; RV64I-NEXT:    mul a0, a0, a2
630; RV64I-NEXT:    add a0, a0, a1
631; RV64I-NEXT:    ret
632;
633; RV64ZBA-LABEL: addmul100:
634; RV64ZBA:       # %bb.0:
635; RV64ZBA-NEXT:    sh2add a0, a0, a0
636; RV64ZBA-NEXT:    sh2add a0, a0, a0
637; RV64ZBA-NEXT:    sh2add a0, a0, a1
638; RV64ZBA-NEXT:    ret
639  %c = mul i64 %a, 100
640  %d = add i64 %c, %b
641  ret i64 %d
642}
643
644define i64 @mul162(i64 %a) {
645; RV64I-LABEL: mul162:
646; RV64I:       # %bb.0:
647; RV64I-NEXT:    li a1, 162
648; RV64I-NEXT:    mul a0, a0, a1
649; RV64I-NEXT:    ret
650;
651; RV64ZBA-LABEL: mul162:
652; RV64ZBA:       # %bb.0:
653; RV64ZBA-NEXT:    sh3add a0, a0, a0
654; RV64ZBA-NEXT:    sh3add a0, a0, a0
655; RV64ZBA-NEXT:    slli a0, a0, 1
656; RV64ZBA-NEXT:    ret
657  %c = mul i64 %a, 162
658  ret i64 %c
659}
660
661define i64 @addmul162(i64 %a, i64 %b) {
662; RV64I-LABEL: addmul162:
663; RV64I:       # %bb.0:
664; RV64I-NEXT:    li a2, 162
665; RV64I-NEXT:    mul a0, a0, a2
666; RV64I-NEXT:    add a0, a0, a1
667; RV64I-NEXT:    ret
668;
669; RV64ZBA-LABEL: addmul162:
670; RV64ZBA:       # %bb.0:
671; RV64ZBA-NEXT:    sh3add a0, a0, a0
672; RV64ZBA-NEXT:    sh3add a0, a0, a0
673; RV64ZBA-NEXT:    sh1add a0, a0, a1
674; RV64ZBA-NEXT:    ret
675  %c = mul i64 %a, 162
676  %d = add i64 %c, %b
677  ret i64 %d
678}
679
680define i64 @mul180(i64 %a) {
681; RV64I-LABEL: mul180:
682; RV64I:       # %bb.0:
683; RV64I-NEXT:    li a1, 180
684; RV64I-NEXT:    mul a0, a0, a1
685; RV64I-NEXT:    ret
686;
687; RV64ZBA-LABEL: mul180:
688; RV64ZBA:       # %bb.0:
689; RV64ZBA-NEXT:    sh2add a0, a0, a0
690; RV64ZBA-NEXT:    sh3add a0, a0, a0
691; RV64ZBA-NEXT:    slli a0, a0, 2
692; RV64ZBA-NEXT:    ret
693  %c = mul i64 %a, 180
694  ret i64 %c
695}
696
697define i64 @addmul180(i64 %a, i64 %b) {
698; RV64I-LABEL: addmul180:
699; RV64I:       # %bb.0:
700; RV64I-NEXT:    li a2, 180
701; RV64I-NEXT:    mul a0, a0, a2
702; RV64I-NEXT:    add a0, a0, a1
703; RV64I-NEXT:    ret
704;
705; RV64ZBA-LABEL: addmul180:
706; RV64ZBA:       # %bb.0:
707; RV64ZBA-NEXT:    sh2add a0, a0, a0
708; RV64ZBA-NEXT:    sh3add a0, a0, a0
709; RV64ZBA-NEXT:    sh2add a0, a0, a1
710; RV64ZBA-NEXT:    ret
711  %c = mul i64 %a, 180
712  %d = add i64 %c, %b
713  ret i64 %d
714}
715
716define i64 @add255mul180(i64 %a) {
717; RV64I-LABEL: add255mul180:
718; RV64I:       # %bb.0:
719; RV64I-NEXT:    li a1, 180
720; RV64I-NEXT:    mul a0, a0, a1
721; RV64I-NEXT:    addi a0, a0, 255
722; RV64I-NEXT:    ret
723;
724; RV64ZBA-LABEL: add255mul180:
725; RV64ZBA:       # %bb.0:
726; RV64ZBA-NEXT:    sh2add a0, a0, a0
727; RV64ZBA-NEXT:    sh3add a0, a0, a0
728; RV64ZBA-NEXT:    slli a0, a0, 2
729; RV64ZBA-NEXT:    addi a0, a0, 255
730; RV64ZBA-NEXT:    ret
731  %c = mul i64 %a, 180
732  %d = add i64 %c, 255
733  ret i64 %d
734}
735
736define i64 @mul200(i64 %a) {
737; RV64I-LABEL: mul200:
738; RV64I:       # %bb.0:
739; RV64I-NEXT:    li a1, 200
740; RV64I-NEXT:    mul a0, a0, a1
741; RV64I-NEXT:    ret
742;
743; RV64ZBA-LABEL: mul200:
744; RV64ZBA:       # %bb.0:
745; RV64ZBA-NEXT:    sh2add a0, a0, a0
746; RV64ZBA-NEXT:    sh2add a0, a0, a0
747; RV64ZBA-NEXT:    slli a0, a0, 3
748; RV64ZBA-NEXT:    ret
749  %c = mul i64 %a, 200
750  ret i64 %c
751}
752
753define i64 @addmul200(i64 %a, i64 %b) {
754; RV64I-LABEL: addmul200:
755; RV64I:       # %bb.0:
756; RV64I-NEXT:    li a2, 200
757; RV64I-NEXT:    mul a0, a0, a2
758; RV64I-NEXT:    add a0, a0, a1
759; RV64I-NEXT:    ret
760;
761; RV64ZBA-LABEL: addmul200:
762; RV64ZBA:       # %bb.0:
763; RV64ZBA-NEXT:    sh2add a0, a0, a0
764; RV64ZBA-NEXT:    sh2add a0, a0, a0
765; RV64ZBA-NEXT:    sh3add a0, a0, a1
766; RV64ZBA-NEXT:    ret
767  %c = mul i64 %a, 200
768  %d = add i64 %c, %b
769  ret i64 %d
770}
771
772define i64 @addmul4096(i64 %a, i64 %b) {
773; CHECK-LABEL: addmul4096:
774; CHECK:       # %bb.0:
775; CHECK-NEXT:    slli a0, a0, 12
776; CHECK-NEXT:    add a0, a0, a1
777; CHECK-NEXT:    ret
778  %c = mul i64 %a, 4096
779  %d = add i64 %c, %b
780  ret i64 %d
781}
782
783define i64 @addmul4230(i64 %a, i64 %b) {
784; CHECK-LABEL: addmul4230:
785; CHECK:       # %bb.0:
786; CHECK-NEXT:    lui a2, 1
787; CHECK-NEXT:    addiw a2, a2, 134
788; CHECK-NEXT:    mul a0, a0, a2
789; CHECK-NEXT:    add a0, a0, a1
790; CHECK-NEXT:    ret
791  %c = mul i64 %a, 4230
792  %d = add i64 %c, %b
793  ret i64 %d
794}
795
796define i64 @mul96(i64 %a) {
797; RV64I-LABEL: mul96:
798; RV64I:       # %bb.0:
799; RV64I-NEXT:    slli a1, a0, 5
800; RV64I-NEXT:    slli a0, a0, 7
801; RV64I-NEXT:    sub a0, a0, a1
802; RV64I-NEXT:    ret
803;
804; RV64ZBA-LABEL: mul96:
805; RV64ZBA:       # %bb.0:
806; RV64ZBA-NEXT:    sh1add a0, a0, a0
807; RV64ZBA-NEXT:    slli a0, a0, 5
808; RV64ZBA-NEXT:    ret
809  %c = mul i64 %a, 96
810  ret i64 %c
811}
812
813define i64 @mul119(i64 %a) {
814; RV64I-LABEL: mul119:
815; RV64I:       # %bb.0:
816; RV64I-NEXT:    li a1, 119
817; RV64I-NEXT:    mul a0, a0, a1
818; RV64I-NEXT:    ret
819;
820; RV64ZBA-LABEL: mul119:
821; RV64ZBA:       # %bb.0:
822; RV64ZBA-NEXT:    sh3add a1, a0, a0
823; RV64ZBA-NEXT:    slli a0, a0, 7
824; RV64ZBA-NEXT:    sub a0, a0, a1
825; RV64ZBA-NEXT:    ret
826  %c = mul i64 %a, 119
827  ret i64 %c
828}
829
830define i64 @mul123(i64 %a) {
831; RV64I-LABEL: mul123:
832; RV64I:       # %bb.0:
833; RV64I-NEXT:    li a1, 123
834; RV64I-NEXT:    mul a0, a0, a1
835; RV64I-NEXT:    ret
836;
837; RV64ZBA-LABEL: mul123:
838; RV64ZBA:       # %bb.0:
839; RV64ZBA-NEXT:    sh2add a1, a0, a0
840; RV64ZBA-NEXT:    slli a0, a0, 7
841; RV64ZBA-NEXT:    sub a0, a0, a1
842; RV64ZBA-NEXT:    ret
843  %c = mul i64 %a, 123
844  ret i64 %c
845}
846
847define i64 @mul125(i64 %a) {
848; RV64I-LABEL: mul125:
849; RV64I:       # %bb.0:
850; RV64I-NEXT:    li a1, 125
851; RV64I-NEXT:    mul a0, a0, a1
852; RV64I-NEXT:    ret
853;
854; RV64ZBA-LABEL: mul125:
855; RV64ZBA:       # %bb.0:
856; RV64ZBA-NEXT:    sh1add a1, a0, a0
857; RV64ZBA-NEXT:    slli a0, a0, 7
858; RV64ZBA-NEXT:    sub a0, a0, a1
859; RV64ZBA-NEXT:    ret
860  %c = mul i64 %a, 125
861  ret i64 %c
862}
863
864define i64 @mul131(i64 %a) {
865; RV64I-LABEL: mul131:
866; RV64I:       # %bb.0:
867; RV64I-NEXT:    li a1, 131
868; RV64I-NEXT:    mul a0, a0, a1
869; RV64I-NEXT:    ret
870;
871; RV64ZBA-LABEL: mul131:
872; RV64ZBA:       # %bb.0:
873; RV64ZBA-NEXT:    sh1add a1, a0, a0
874; RV64ZBA-NEXT:    slli a0, a0, 7
875; RV64ZBA-NEXT:    add a0, a0, a1
876; RV64ZBA-NEXT:    ret
877  %c = mul i64 %a, 131
878  ret i64 %c
879}
880
881define i64 @mul133(i64 %a) {
882; RV64I-LABEL: mul133:
883; RV64I:       # %bb.0:
884; RV64I-NEXT:    li a1, 133
885; RV64I-NEXT:    mul a0, a0, a1
886; RV64I-NEXT:    ret
887;
888; RV64ZBA-LABEL: mul133:
889; RV64ZBA:       # %bb.0:
890; RV64ZBA-NEXT:    sh2add a1, a0, a0
891; RV64ZBA-NEXT:    slli a0, a0, 7
892; RV64ZBA-NEXT:    add a0, a0, a1
893; RV64ZBA-NEXT:    ret
894  %c = mul i64 %a, 133
895  ret i64 %c
896}
897
898define i64 @mul137(i64 %a) {
899; RV64I-LABEL: mul137:
900; RV64I:       # %bb.0:
901; RV64I-NEXT:    li a1, 137
902; RV64I-NEXT:    mul a0, a0, a1
903; RV64I-NEXT:    ret
904;
905; RV64ZBA-LABEL: mul137:
906; RV64ZBA:       # %bb.0:
907; RV64ZBA-NEXT:    sh3add a1, a0, a0
908; RV64ZBA-NEXT:    slli a0, a0, 7
909; RV64ZBA-NEXT:    add a0, a0, a1
910; RV64ZBA-NEXT:    ret
911  %c = mul i64 %a, 137
912  ret i64 %c
913}
914
915define i64 @mul160(i64 %a) {
916; RV64I-LABEL: mul160:
917; RV64I:       # %bb.0:
918; RV64I-NEXT:    li a1, 160
919; RV64I-NEXT:    mul a0, a0, a1
920; RV64I-NEXT:    ret
921;
922; RV64ZBA-LABEL: mul160:
923; RV64ZBA:       # %bb.0:
924; RV64ZBA-NEXT:    sh2add a0, a0, a0
925; RV64ZBA-NEXT:    slli a0, a0, 5
926; RV64ZBA-NEXT:    ret
927  %c = mul i64 %a, 160
928  ret i64 %c
929}
930
931define i64 @mul288(i64 %a) {
932; RV64I-LABEL: mul288:
933; RV64I:       # %bb.0:
934; RV64I-NEXT:    li a1, 288
935; RV64I-NEXT:    mul a0, a0, a1
936; RV64I-NEXT:    ret
937;
938; RV64ZBA-LABEL: mul288:
939; RV64ZBA:       # %bb.0:
940; RV64ZBA-NEXT:    sh3add a0, a0, a0
941; RV64ZBA-NEXT:    slli a0, a0, 5
942; RV64ZBA-NEXT:    ret
943  %c = mul i64 %a, 288
944  ret i64 %c
945}
946
947define i64 @zext_mul68(i32 signext %a) {
948; RV64I-LABEL: zext_mul68:
949; RV64I:       # %bb.0:
950; RV64I-NEXT:    li a1, 17
951; RV64I-NEXT:    slli a1, a1, 34
952; RV64I-NEXT:    slli a0, a0, 32
953; RV64I-NEXT:    mulhu a0, a0, a1
954; RV64I-NEXT:    ret
955;
956; RV64ZBA-LABEL: zext_mul68:
957; RV64ZBA:       # %bb.0:
958; RV64ZBA-NEXT:    slli.uw a1, a0, 6
959; RV64ZBA-NEXT:    sh2add.uw a0, a0, a1
960; RV64ZBA-NEXT:    ret
961  %b = zext i32 %a to i64
962  %c = mul i64 %b, 68
963  ret i64 %c
964}
965
966define i64 @zext_mul96(i32 signext %a) {
967; RV64I-LABEL: zext_mul96:
968; RV64I:       # %bb.0:
969; RV64I-NEXT:    slli a0, a0, 32
970; RV64I-NEXT:    srli a1, a0, 27
971; RV64I-NEXT:    srli a0, a0, 25
972; RV64I-NEXT:    sub a0, a0, a1
973; RV64I-NEXT:    ret
974;
975; RV64ZBA-LABEL: zext_mul96:
976; RV64ZBA:       # %bb.0:
977; RV64ZBA-NEXT:    slli.uw a0, a0, 5
978; RV64ZBA-NEXT:    sh1add a0, a0, a0
979; RV64ZBA-NEXT:    ret
980  %b = zext i32 %a to i64
981  %c = mul i64 %b, 96
982  ret i64 %c
983}
984
985define i64 @zext_mul160(i32 signext %a) {
986; RV64I-LABEL: zext_mul160:
987; RV64I:       # %bb.0:
988; RV64I-NEXT:    li a1, 5
989; RV64I-NEXT:    slli a1, a1, 37
990; RV64I-NEXT:    slli a0, a0, 32
991; RV64I-NEXT:    mulhu a0, a0, a1
992; RV64I-NEXT:    ret
993;
994; RV64ZBA-LABEL: zext_mul160:
995; RV64ZBA:       # %bb.0:
996; RV64ZBA-NEXT:    slli.uw a0, a0, 5
997; RV64ZBA-NEXT:    sh2add a0, a0, a0
998; RV64ZBA-NEXT:    ret
999  %b = zext i32 %a to i64
1000  %c = mul i64 %b, 160
1001  ret i64 %c
1002}
1003
1004define i64 @zext_mul288(i32 signext %a) {
1005; RV64I-LABEL: zext_mul288:
1006; RV64I:       # %bb.0:
1007; RV64I-NEXT:    li a1, 9
1008; RV64I-NEXT:    slli a1, a1, 37
1009; RV64I-NEXT:    slli a0, a0, 32
1010; RV64I-NEXT:    mulhu a0, a0, a1
1011; RV64I-NEXT:    ret
1012;
1013; RV64ZBA-LABEL: zext_mul288:
1014; RV64ZBA:       # %bb.0:
1015; RV64ZBA-NEXT:    slli.uw a0, a0, 5
1016; RV64ZBA-NEXT:    sh3add a0, a0, a0
1017; RV64ZBA-NEXT:    ret
1018  %b = zext i32 %a to i64
1019  %c = mul i64 %b, 288
1020  ret i64 %c
1021}
1022
1023; We can't use slli.uw becaues the shift amount is more than 31.
1024define i64 @zext_mul12884901888(i32 signext %a) {
1025; RV64I-LABEL: zext_mul12884901888:
1026; RV64I:       # %bb.0:
1027; RV64I-NEXT:    slli a1, a0, 32
1028; RV64I-NEXT:    slli a0, a0, 34
1029; RV64I-NEXT:    sub a0, a0, a1
1030; RV64I-NEXT:    ret
1031;
1032; RV64ZBA-LABEL: zext_mul12884901888:
1033; RV64ZBA:       # %bb.0:
1034; RV64ZBA-NEXT:    sh1add a0, a0, a0
1035; RV64ZBA-NEXT:    slli a0, a0, 32
1036; RV64ZBA-NEXT:    ret
1037  %b = zext i32 %a to i64
1038  %c = mul i64 %b, 12884901888
1039  ret i64 %c
1040}
1041
1042; We can't use slli.uw becaues the shift amount is more than 31.
1043define i64 @zext_mul21474836480(i32 signext %a) {
1044; RV64I-LABEL: zext_mul21474836480:
1045; RV64I:       # %bb.0:
1046; RV64I-NEXT:    li a1, 5
1047; RV64I-NEXT:    slli a1, a1, 32
1048; RV64I-NEXT:    mul a0, a0, a1
1049; RV64I-NEXT:    ret
1050;
1051; RV64ZBA-LABEL: zext_mul21474836480:
1052; RV64ZBA:       # %bb.0:
1053; RV64ZBA-NEXT:    sh2add a0, a0, a0
1054; RV64ZBA-NEXT:    slli a0, a0, 32
1055; RV64ZBA-NEXT:    ret
1056  %b = zext i32 %a to i64
1057  %c = mul i64 %b, 21474836480
1058  ret i64 %c
1059}
1060
1061; We can't use slli.uw becaues the shift amount is more than 31.
1062define i64 @zext_mul38654705664(i32 signext %a) {
1063; RV64I-LABEL: zext_mul38654705664:
1064; RV64I:       # %bb.0:
1065; RV64I-NEXT:    li a1, 9
1066; RV64I-NEXT:    slli a1, a1, 32
1067; RV64I-NEXT:    mul a0, a0, a1
1068; RV64I-NEXT:    ret
1069;
1070; RV64ZBA-LABEL: zext_mul38654705664:
1071; RV64ZBA:       # %bb.0:
1072; RV64ZBA-NEXT:    sh3add a0, a0, a0
1073; RV64ZBA-NEXT:    slli a0, a0, 32
1074; RV64ZBA-NEXT:    ret
1075  %b = zext i32 %a to i64
1076  %c = mul i64 %b, 38654705664
1077  ret i64 %c
1078}
1079
1080define i64 @sh1add_imm(i64 %0) {
1081; CHECK-LABEL: sh1add_imm:
1082; CHECK:       # %bb.0:
1083; CHECK-NEXT:    slli a0, a0, 1
1084; CHECK-NEXT:    addi a0, a0, 5
1085; CHECK-NEXT:    ret
1086  %a = shl i64 %0, 1
1087  %b = add i64 %a, 5
1088  ret i64 %b
1089}
1090
1091define i64 @sh2add_imm(i64 %0) {
1092; CHECK-LABEL: sh2add_imm:
1093; CHECK:       # %bb.0:
1094; CHECK-NEXT:    slli a0, a0, 2
1095; CHECK-NEXT:    addi a0, a0, -6
1096; CHECK-NEXT:    ret
1097  %a = shl i64 %0, 2
1098  %b = add i64 %a, -6
1099  ret i64 %b
1100}
1101
1102define i64 @sh3add_imm(i64 %0) {
1103; CHECK-LABEL: sh3add_imm:
1104; CHECK:       # %bb.0:
1105; CHECK-NEXT:    slli a0, a0, 3
1106; CHECK-NEXT:    addi a0, a0, 7
1107; CHECK-NEXT:    ret
1108  %a = shl i64 %0, 3
1109  %b = add i64 %a, 7
1110  ret i64 %b
1111}
1112
1113define i64 @sh1adduw_imm(i32 signext %0) {
1114; RV64I-LABEL: sh1adduw_imm:
1115; RV64I:       # %bb.0:
1116; RV64I-NEXT:    slli a0, a0, 32
1117; RV64I-NEXT:    srli a0, a0, 31
1118; RV64I-NEXT:    addi a0, a0, 11
1119; RV64I-NEXT:    ret
1120;
1121; RV64ZBA-LABEL: sh1adduw_imm:
1122; RV64ZBA:       # %bb.0:
1123; RV64ZBA-NEXT:    slli.uw a0, a0, 1
1124; RV64ZBA-NEXT:    addi a0, a0, 11
1125; RV64ZBA-NEXT:    ret
1126  %a = zext i32 %0 to i64
1127  %b = shl i64 %a, 1
1128  %c = add i64 %b, 11
1129  ret i64 %c
1130}
1131
1132define i64 @sh2adduw_imm(i32 signext %0) {
1133; RV64I-LABEL: sh2adduw_imm:
1134; RV64I:       # %bb.0:
1135; RV64I-NEXT:    slli a0, a0, 32
1136; RV64I-NEXT:    srli a0, a0, 30
1137; RV64I-NEXT:    addi a0, a0, -12
1138; RV64I-NEXT:    ret
1139;
1140; RV64ZBA-LABEL: sh2adduw_imm:
1141; RV64ZBA:       # %bb.0:
1142; RV64ZBA-NEXT:    slli.uw a0, a0, 2
1143; RV64ZBA-NEXT:    addi a0, a0, -12
1144; RV64ZBA-NEXT:    ret
1145  %a = zext i32 %0 to i64
1146  %b = shl i64 %a, 2
1147  %c = add i64 %b, -12
1148  ret i64 %c
1149}
1150
1151define i64 @sh3adduw_imm(i32 signext %0) {
1152; RV64I-LABEL: sh3adduw_imm:
1153; RV64I:       # %bb.0:
1154; RV64I-NEXT:    slli a0, a0, 32
1155; RV64I-NEXT:    srli a0, a0, 29
1156; RV64I-NEXT:    addi a0, a0, 13
1157; RV64I-NEXT:    ret
1158;
1159; RV64ZBA-LABEL: sh3adduw_imm:
1160; RV64ZBA:       # %bb.0:
1161; RV64ZBA-NEXT:    slli.uw a0, a0, 3
1162; RV64ZBA-NEXT:    addi a0, a0, 13
1163; RV64ZBA-NEXT:    ret
1164  %a = zext i32 %0 to i64
1165  %b = shl i64 %a, 3
1166  %c = add i64 %b, 13
1167  ret i64 %c
1168}
1169
1170define i64 @adduw_imm(i32 signext %0) nounwind {
1171; RV64I-LABEL: adduw_imm:
1172; RV64I:       # %bb.0:
1173; RV64I-NEXT:    slli a0, a0, 32
1174; RV64I-NEXT:    srli a0, a0, 32
1175; RV64I-NEXT:    addi a0, a0, 5
1176; RV64I-NEXT:    ret
1177;
1178; RV64ZBA-LABEL: adduw_imm:
1179; RV64ZBA:       # %bb.0:
1180; RV64ZBA-NEXT:    zext.w a0, a0
1181; RV64ZBA-NEXT:    addi a0, a0, 5
1182; RV64ZBA-NEXT:    ret
1183  %a = zext i32 %0 to i64
1184  %b = add i64 %a, 5
1185  ret i64 %b
1186}
1187
1188define i64 @mul258(i64 %a) {
1189; RV64I-LABEL: mul258:
1190; RV64I:       # %bb.0:
1191; RV64I-NEXT:    li a1, 258
1192; RV64I-NEXT:    mul a0, a0, a1
1193; RV64I-NEXT:    ret
1194;
1195; RV64ZBA-LABEL: mul258:
1196; RV64ZBA:       # %bb.0:
1197; RV64ZBA-NEXT:    slli a1, a0, 8
1198; RV64ZBA-NEXT:    sh1add a0, a0, a1
1199; RV64ZBA-NEXT:    ret
1200  %c = mul i64 %a, 258
1201  ret i64 %c
1202}
1203
1204define i64 @mul260(i64 %a) {
1205; RV64I-LABEL: mul260:
1206; RV64I:       # %bb.0:
1207; RV64I-NEXT:    li a1, 260
1208; RV64I-NEXT:    mul a0, a0, a1
1209; RV64I-NEXT:    ret
1210;
1211; RV64ZBA-LABEL: mul260:
1212; RV64ZBA:       # %bb.0:
1213; RV64ZBA-NEXT:    slli a1, a0, 8
1214; RV64ZBA-NEXT:    sh2add a0, a0, a1
1215; RV64ZBA-NEXT:    ret
1216  %c = mul i64 %a, 260
1217  ret i64 %c
1218}
1219
1220define i64 @mul264(i64 %a) {
1221; RV64I-LABEL: mul264:
1222; RV64I:       # %bb.0:
1223; RV64I-NEXT:    li a1, 264
1224; RV64I-NEXT:    mul a0, a0, a1
1225; RV64I-NEXT:    ret
1226;
1227; RV64ZBA-LABEL: mul264:
1228; RV64ZBA:       # %bb.0:
1229; RV64ZBA-NEXT:    slli a1, a0, 8
1230; RV64ZBA-NEXT:    sh3add a0, a0, a1
1231; RV64ZBA-NEXT:    ret
1232  %c = mul i64 %a, 264
1233  ret i64 %c
1234}
1235
1236define i64 @imm_zextw() nounwind {
1237; RV64I-LABEL: imm_zextw:
1238; RV64I:       # %bb.0:
1239; RV64I-NEXT:    li a0, 1
1240; RV64I-NEXT:    slli a0, a0, 32
1241; RV64I-NEXT:    addi a0, a0, -2
1242; RV64I-NEXT:    ret
1243;
1244; RV64ZBA-LABEL: imm_zextw:
1245; RV64ZBA:       # %bb.0:
1246; RV64ZBA-NEXT:    li a0, -2
1247; RV64ZBA-NEXT:    zext.w a0, a0
1248; RV64ZBA-NEXT:    ret
1249  ret i64 4294967294 ; -2 in 32 bits.
1250}
1251
1252define i64 @mul11(i64 %a) {
1253; RV64I-LABEL: mul11:
1254; RV64I:       # %bb.0:
1255; RV64I-NEXT:    li a1, 11
1256; RV64I-NEXT:    mul a0, a0, a1
1257; RV64I-NEXT:    ret
1258;
1259; RV64ZBA-LABEL: mul11:
1260; RV64ZBA:       # %bb.0:
1261; RV64ZBA-NEXT:    sh2add a1, a0, a0
1262; RV64ZBA-NEXT:    sh1add a0, a1, a0
1263; RV64ZBA-NEXT:    ret
1264  %c = mul i64 %a, 11
1265  ret i64 %c
1266}
1267
1268define i64 @mul19(i64 %a) {
1269; RV64I-LABEL: mul19:
1270; RV64I:       # %bb.0:
1271; RV64I-NEXT:    li a1, 19
1272; RV64I-NEXT:    mul a0, a0, a1
1273; RV64I-NEXT:    ret
1274;
1275; RV64ZBA-LABEL: mul19:
1276; RV64ZBA:       # %bb.0:
1277; RV64ZBA-NEXT:    sh3add a1, a0, a0
1278; RV64ZBA-NEXT:    sh1add a0, a1, a0
1279; RV64ZBA-NEXT:    ret
1280  %c = mul i64 %a, 19
1281  ret i64 %c
1282}
1283
1284define i64 @mul13(i64 %a) {
1285; RV64I-LABEL: mul13:
1286; RV64I:       # %bb.0:
1287; RV64I-NEXT:    li a1, 13
1288; RV64I-NEXT:    mul a0, a0, a1
1289; RV64I-NEXT:    ret
1290;
1291; RV64ZBA-LABEL: mul13:
1292; RV64ZBA:       # %bb.0:
1293; RV64ZBA-NEXT:    sh1add a1, a0, a0
1294; RV64ZBA-NEXT:    sh2add a0, a1, a0
1295; RV64ZBA-NEXT:    ret
1296  %c = mul i64 %a, 13
1297  ret i64 %c
1298}
1299
1300define i64 @mul21(i64 %a) {
1301; RV64I-LABEL: mul21:
1302; RV64I:       # %bb.0:
1303; RV64I-NEXT:    li a1, 21
1304; RV64I-NEXT:    mul a0, a0, a1
1305; RV64I-NEXT:    ret
1306;
1307; RV64ZBA-LABEL: mul21:
1308; RV64ZBA:       # %bb.0:
1309; RV64ZBA-NEXT:    sh2add a1, a0, a0
1310; RV64ZBA-NEXT:    sh2add a0, a1, a0
1311; RV64ZBA-NEXT:    ret
1312  %c = mul i64 %a, 21
1313  ret i64 %c
1314}
1315
1316define i64 @mul37(i64 %a) {
1317; RV64I-LABEL: mul37:
1318; RV64I:       # %bb.0:
1319; RV64I-NEXT:    li a1, 37
1320; RV64I-NEXT:    mul a0, a0, a1
1321; RV64I-NEXT:    ret
1322;
1323; RV64ZBA-LABEL: mul37:
1324; RV64ZBA:       # %bb.0:
1325; RV64ZBA-NEXT:    sh3add a1, a0, a0
1326; RV64ZBA-NEXT:    sh2add a0, a1, a0
1327; RV64ZBA-NEXT:    ret
1328  %c = mul i64 %a, 37
1329  ret i64 %c
1330}
1331
1332define i64 @mul25(i64 %a) {
1333; RV64I-LABEL: mul25:
1334; RV64I:       # %bb.0:
1335; RV64I-NEXT:    li a1, 25
1336; RV64I-NEXT:    mul a0, a0, a1
1337; RV64I-NEXT:    ret
1338;
1339; RV64ZBA-LABEL: mul25:
1340; RV64ZBA:       # %bb.0:
1341; RV64ZBA-NEXT:    sh2add a0, a0, a0
1342; RV64ZBA-NEXT:    sh2add a0, a0, a0
1343; RV64ZBA-NEXT:    ret
1344  %c = mul i64 %a, 25
1345  ret i64 %c
1346}
1347
1348define i64 @mul41(i64 %a) {
1349; RV64I-LABEL: mul41:
1350; RV64I:       # %bb.0:
1351; RV64I-NEXT:    li a1, 41
1352; RV64I-NEXT:    mul a0, a0, a1
1353; RV64I-NEXT:    ret
1354;
1355; RV64ZBA-LABEL: mul41:
1356; RV64ZBA:       # %bb.0:
1357; RV64ZBA-NEXT:    sh2add a1, a0, a0
1358; RV64ZBA-NEXT:    sh3add a0, a1, a0
1359; RV64ZBA-NEXT:    ret
1360  %c = mul i64 %a, 41
1361  ret i64 %c
1362}
1363
1364define i64 @mul73(i64 %a) {
1365; RV64I-LABEL: mul73:
1366; RV64I:       # %bb.0:
1367; RV64I-NEXT:    li a1, 73
1368; RV64I-NEXT:    mul a0, a0, a1
1369; RV64I-NEXT:    ret
1370;
1371; RV64ZBA-LABEL: mul73:
1372; RV64ZBA:       # %bb.0:
1373; RV64ZBA-NEXT:    sh3add a1, a0, a0
1374; RV64ZBA-NEXT:    sh3add a0, a1, a0
1375; RV64ZBA-NEXT:    ret
1376  %c = mul i64 %a, 73
1377  ret i64 %c
1378}
1379
1380define i64 @mul27(i64 %a) {
1381; RV64I-LABEL: mul27:
1382; RV64I:       # %bb.0:
1383; RV64I-NEXT:    li a1, 27
1384; RV64I-NEXT:    mul a0, a0, a1
1385; RV64I-NEXT:    ret
1386;
1387; RV64ZBA-LABEL: mul27:
1388; RV64ZBA:       # %bb.0:
1389; RV64ZBA-NEXT:    sh1add a0, a0, a0
1390; RV64ZBA-NEXT:    sh3add a0, a0, a0
1391; RV64ZBA-NEXT:    ret
1392  %c = mul i64 %a, 27
1393  ret i64 %c
1394}
1395
1396define i64 @mul45(i64 %a) {
1397; RV64I-LABEL: mul45:
1398; RV64I:       # %bb.0:
1399; RV64I-NEXT:    li a1, 45
1400; RV64I-NEXT:    mul a0, a0, a1
1401; RV64I-NEXT:    ret
1402;
1403; RV64ZBA-LABEL: mul45:
1404; RV64ZBA:       # %bb.0:
1405; RV64ZBA-NEXT:    sh2add a0, a0, a0
1406; RV64ZBA-NEXT:    sh3add a0, a0, a0
1407; RV64ZBA-NEXT:    ret
1408  %c = mul i64 %a, 45
1409  ret i64 %c
1410}
1411
1412define i64 @mul81(i64 %a) {
1413; RV64I-LABEL: mul81:
1414; RV64I:       # %bb.0:
1415; RV64I-NEXT:    li a1, 81
1416; RV64I-NEXT:    mul a0, a0, a1
1417; RV64I-NEXT:    ret
1418;
1419; RV64ZBA-LABEL: mul81:
1420; RV64ZBA:       # %bb.0:
1421; RV64ZBA-NEXT:    sh3add a0, a0, a0
1422; RV64ZBA-NEXT:    sh3add a0, a0, a0
1423; RV64ZBA-NEXT:    ret
1424  %c = mul i64 %a, 81
1425  ret i64 %c
1426}
1427
1428define i64 @mul4098(i64 %a) {
1429; RV64I-LABEL: mul4098:
1430; RV64I:       # %bb.0:
1431; RV64I-NEXT:    slli a1, a0, 1
1432; RV64I-NEXT:    slli a0, a0, 12
1433; RV64I-NEXT:    add a0, a0, a1
1434; RV64I-NEXT:    ret
1435;
1436; RV64ZBA-LABEL: mul4098:
1437; RV64ZBA:       # %bb.0:
1438; RV64ZBA-NEXT:    slli a1, a0, 12
1439; RV64ZBA-NEXT:    sh1add a0, a0, a1
1440; RV64ZBA-NEXT:    ret
1441  %c = mul i64 %a, 4098
1442  ret i64 %c
1443}
1444
1445define i64 @mul4100(i64 %a) {
1446; RV64I-LABEL: mul4100:
1447; RV64I:       # %bb.0:
1448; RV64I-NEXT:    slli a1, a0, 2
1449; RV64I-NEXT:    slli a0, a0, 12
1450; RV64I-NEXT:    add a0, a0, a1
1451; RV64I-NEXT:    ret
1452;
1453; RV64ZBA-LABEL: mul4100:
1454; RV64ZBA:       # %bb.0:
1455; RV64ZBA-NEXT:    slli a1, a0, 12
1456; RV64ZBA-NEXT:    sh2add a0, a0, a1
1457; RV64ZBA-NEXT:    ret
1458  %c = mul i64 %a, 4100
1459  ret i64 %c
1460}
1461
1462define i64 @mul4104(i64 %a) {
1463; RV64I-LABEL: mul4104:
1464; RV64I:       # %bb.0:
1465; RV64I-NEXT:    slli a1, a0, 3
1466; RV64I-NEXT:    slli a0, a0, 12
1467; RV64I-NEXT:    add a0, a0, a1
1468; RV64I-NEXT:    ret
1469;
1470; RV64ZBA-LABEL: mul4104:
1471; RV64ZBA:       # %bb.0:
1472; RV64ZBA-NEXT:    slli a1, a0, 12
1473; RV64ZBA-NEXT:    sh3add a0, a0, a1
1474; RV64ZBA-NEXT:    ret
1475  %c = mul i64 %a, 4104
1476  ret i64 %c
1477}
1478
1479define signext i32 @mulw192(i32 signext %a) {
1480; RV64I-LABEL: mulw192:
1481; RV64I:       # %bb.0:
1482; RV64I-NEXT:    slli a1, a0, 6
1483; RV64I-NEXT:    slli a0, a0, 8
1484; RV64I-NEXT:    subw a0, a0, a1
1485; RV64I-NEXT:    ret
1486;
1487; RV64ZBA-LABEL: mulw192:
1488; RV64ZBA:       # %bb.0:
1489; RV64ZBA-NEXT:    sh1add a0, a0, a0
1490; RV64ZBA-NEXT:    slliw a0, a0, 6
1491; RV64ZBA-NEXT:    ret
1492  %c = mul i32 %a, 192
1493  ret i32 %c
1494}
1495
1496define signext i32 @mulw320(i32 signext %a) {
1497; RV64I-LABEL: mulw320:
1498; RV64I:       # %bb.0:
1499; RV64I-NEXT:    li a1, 320
1500; RV64I-NEXT:    mulw a0, a0, a1
1501; RV64I-NEXT:    ret
1502;
1503; RV64ZBA-LABEL: mulw320:
1504; RV64ZBA:       # %bb.0:
1505; RV64ZBA-NEXT:    sh2add a0, a0, a0
1506; RV64ZBA-NEXT:    slliw a0, a0, 6
1507; RV64ZBA-NEXT:    ret
1508  %c = mul i32 %a, 320
1509  ret i32 %c
1510}
1511
1512define signext i32 @mulw576(i32 signext %a) {
1513; RV64I-LABEL: mulw576:
1514; RV64I:       # %bb.0:
1515; RV64I-NEXT:    li a1, 576
1516; RV64I-NEXT:    mulw a0, a0, a1
1517; RV64I-NEXT:    ret
1518;
1519; RV64ZBA-LABEL: mulw576:
1520; RV64ZBA:       # %bb.0:
1521; RV64ZBA-NEXT:    sh3add a0, a0, a0
1522; RV64ZBA-NEXT:    slliw a0, a0, 6
1523; RV64ZBA-NEXT:    ret
1524  %c = mul i32 %a, 576
1525  ret i32 %c
1526}
1527
1528define i64 @add4104(i64 %a) {
1529; RV64I-LABEL: add4104:
1530; RV64I:       # %bb.0:
1531; RV64I-NEXT:    lui a1, 1
1532; RV64I-NEXT:    addiw a1, a1, 8
1533; RV64I-NEXT:    add a0, a0, a1
1534; RV64I-NEXT:    ret
1535;
1536; RV64ZBA-LABEL: add4104:
1537; RV64ZBA:       # %bb.0:
1538; RV64ZBA-NEXT:    li a1, 1026
1539; RV64ZBA-NEXT:    sh2add a0, a1, a0
1540; RV64ZBA-NEXT:    ret
1541  %c = add i64 %a, 4104
1542  ret i64 %c
1543}
1544
1545define i64 @add4104_2(i64 %a) {
1546; RV64I-LABEL: add4104_2:
1547; RV64I:       # %bb.0:
1548; RV64I-NEXT:    lui a1, 1
1549; RV64I-NEXT:    addiw a1, a1, 8
1550; RV64I-NEXT:    or a0, a0, a1
1551; RV64I-NEXT:    ret
1552;
1553; RV64ZBA-LABEL: add4104_2:
1554; RV64ZBA:       # %bb.0:
1555; RV64ZBA-NEXT:    li a1, 1026
1556; RV64ZBA-NEXT:    sh2add a0, a1, a0
1557; RV64ZBA-NEXT:    ret
1558  %c = or disjoint i64 %a, 4104
1559  ret i64 %c
1560}
1561
1562define i64 @add8208(i64 %a) {
1563; RV64I-LABEL: add8208:
1564; RV64I:       # %bb.0:
1565; RV64I-NEXT:    lui a1, 2
1566; RV64I-NEXT:    addiw a1, a1, 16
1567; RV64I-NEXT:    add a0, a0, a1
1568; RV64I-NEXT:    ret
1569;
1570; RV64ZBA-LABEL: add8208:
1571; RV64ZBA:       # %bb.0:
1572; RV64ZBA-NEXT:    li a1, 1026
1573; RV64ZBA-NEXT:    sh3add a0, a1, a0
1574; RV64ZBA-NEXT:    ret
1575  %c = add i64 %a, 8208
1576  ret i64 %c
1577}
1578
1579; Make sure we prefer LUI for the 8192 instead of using sh3add.
1580define signext i32 @add8192_i32(i32 signext %a) {
1581; CHECK-LABEL: add8192_i32:
1582; CHECK:       # %bb.0:
1583; CHECK-NEXT:    lui a1, 2
1584; CHECK-NEXT:    addw a0, a0, a1
1585; CHECK-NEXT:    ret
1586  %c = add i32 %a, 8192
1587  ret i32 %c
1588}
1589
1590; Make sure we prefer LUI for the 8192 instead of using sh3add.
1591define i64 @add8192(i64 %a) {
1592; CHECK-LABEL: add8192:
1593; CHECK:       # %bb.0:
1594; CHECK-NEXT:    lui a1, 2
1595; CHECK-NEXT:    add a0, a0, a1
1596; CHECK-NEXT:    ret
1597  %c = add i64 %a, 8192
1598  ret i64 %c
1599}
1600
1601define signext i32 @addshl32_5_6(i32 signext %a, i32 signext %b) {
1602; RV64I-LABEL: addshl32_5_6:
1603; RV64I:       # %bb.0:
1604; RV64I-NEXT:    slli a0, a0, 5
1605; RV64I-NEXT:    slli a1, a1, 6
1606; RV64I-NEXT:    addw a0, a0, a1
1607; RV64I-NEXT:    ret
1608;
1609; RV64ZBA-LABEL: addshl32_5_6:
1610; RV64ZBA:       # %bb.0:
1611; RV64ZBA-NEXT:    sh1add a0, a1, a0
1612; RV64ZBA-NEXT:    slliw a0, a0, 5
1613; RV64ZBA-NEXT:    ret
1614  %c = shl i32 %a, 5
1615  %d = shl i32 %b, 6
1616  %e = add i32 %c, %d
1617  ret i32 %e
1618}
1619
1620define i64 @addshl64_5_6(i64 %a, i64 %b) {
1621; RV64I-LABEL: addshl64_5_6:
1622; RV64I:       # %bb.0:
1623; RV64I-NEXT:    slli a0, a0, 5
1624; RV64I-NEXT:    slli a1, a1, 6
1625; RV64I-NEXT:    add a0, a0, a1
1626; RV64I-NEXT:    ret
1627;
1628; RV64ZBA-LABEL: addshl64_5_6:
1629; RV64ZBA:       # %bb.0:
1630; RV64ZBA-NEXT:    sh1add a0, a1, a0
1631; RV64ZBA-NEXT:    slli a0, a0, 5
1632; RV64ZBA-NEXT:    ret
1633  %c = shl i64 %a, 5
1634  %d = shl i64 %b, 6
1635  %e = add i64 %c, %d
1636  ret i64 %e
1637}
1638
1639define signext i32 @addshl32_5_7(i32 signext %a, i32 signext %b) {
1640; RV64I-LABEL: addshl32_5_7:
1641; RV64I:       # %bb.0:
1642; RV64I-NEXT:    slli a0, a0, 5
1643; RV64I-NEXT:    slli a1, a1, 7
1644; RV64I-NEXT:    addw a0, a0, a1
1645; RV64I-NEXT:    ret
1646;
1647; RV64ZBA-LABEL: addshl32_5_7:
1648; RV64ZBA:       # %bb.0:
1649; RV64ZBA-NEXT:    sh2add a0, a1, a0
1650; RV64ZBA-NEXT:    slliw a0, a0, 5
1651; RV64ZBA-NEXT:    ret
1652  %c = shl i32 %a, 5
1653  %d = shl i32 %b, 7
1654  %e = add i32 %c, %d
1655  ret i32 %e
1656}
1657
1658define i64 @addshl64_5_7(i64 %a, i64 %b) {
1659; RV64I-LABEL: addshl64_5_7:
1660; RV64I:       # %bb.0:
1661; RV64I-NEXT:    slli a0, a0, 5
1662; RV64I-NEXT:    slli a1, a1, 7
1663; RV64I-NEXT:    add a0, a0, a1
1664; RV64I-NEXT:    ret
1665;
1666; RV64ZBA-LABEL: addshl64_5_7:
1667; RV64ZBA:       # %bb.0:
1668; RV64ZBA-NEXT:    sh2add a0, a1, a0
1669; RV64ZBA-NEXT:    slli a0, a0, 5
1670; RV64ZBA-NEXT:    ret
1671  %c = shl i64 %a, 5
1672  %d = shl i64 %b, 7
1673  %e = add i64 %c, %d
1674  ret i64 %e
1675}
1676
1677define signext i32 @addshl32_5_8(i32 signext %a, i32 signext %b) {
1678; RV64I-LABEL: addshl32_5_8:
1679; RV64I:       # %bb.0:
1680; RV64I-NEXT:    slli a0, a0, 5
1681; RV64I-NEXT:    slli a1, a1, 8
1682; RV64I-NEXT:    addw a0, a0, a1
1683; RV64I-NEXT:    ret
1684;
1685; RV64ZBA-LABEL: addshl32_5_8:
1686; RV64ZBA:       # %bb.0:
1687; RV64ZBA-NEXT:    sh3add a0, a1, a0
1688; RV64ZBA-NEXT:    slliw a0, a0, 5
1689; RV64ZBA-NEXT:    ret
1690  %c = shl i32 %a, 5
1691  %d = shl i32 %b, 8
1692  %e = add i32 %c, %d
1693  ret i32 %e
1694}
1695
1696define i64 @addshl64_5_8(i64 %a, i64 %b) {
1697; RV64I-LABEL: addshl64_5_8:
1698; RV64I:       # %bb.0:
1699; RV64I-NEXT:    slli a0, a0, 5
1700; RV64I-NEXT:    slli a1, a1, 8
1701; RV64I-NEXT:    add a0, a0, a1
1702; RV64I-NEXT:    ret
1703;
1704; RV64ZBA-LABEL: addshl64_5_8:
1705; RV64ZBA:       # %bb.0:
1706; RV64ZBA-NEXT:    sh3add a0, a1, a0
1707; RV64ZBA-NEXT:    slli a0, a0, 5
1708; RV64ZBA-NEXT:    ret
1709  %c = shl i64 %a, 5
1710  %d = shl i64 %b, 8
1711  %e = add i64 %c, %d
1712  ret i64 %e
1713}
1714
1715; Make sure we use sext.h+slli+srli for Zba+Zbb.
1716define zeroext i32 @sext_ashr_zext_i8(i8 %a) nounwind {
1717; RV64I-LABEL: sext_ashr_zext_i8:
1718; RV64I:       # %bb.0:
1719; RV64I-NEXT:    slli a0, a0, 56
1720; RV64I-NEXT:    srai a0, a0, 31
1721; RV64I-NEXT:    srli a0, a0, 32
1722; RV64I-NEXT:    ret
1723;
1724; RV64ZBANOZBB-LABEL: sext_ashr_zext_i8:
1725; RV64ZBANOZBB:       # %bb.0:
1726; RV64ZBANOZBB-NEXT:    slli a0, a0, 56
1727; RV64ZBANOZBB-NEXT:    srai a0, a0, 31
1728; RV64ZBANOZBB-NEXT:    srli a0, a0, 32
1729; RV64ZBANOZBB-NEXT:    ret
1730;
1731; RV64ZBAZBB-LABEL: sext_ashr_zext_i8:
1732; RV64ZBAZBB:       # %bb.0:
1733; RV64ZBAZBB-NEXT:    sext.b a0, a0
1734; RV64ZBAZBB-NEXT:    slli a0, a0, 23
1735; RV64ZBAZBB-NEXT:    srli a0, a0, 32
1736; RV64ZBAZBB-NEXT:    ret
1737  %ext = sext i8 %a to i32
1738  %1 = ashr i32 %ext, 9
1739  ret i32 %1
1740}
1741
1742define i64 @sh6_sh3_add1(i64 noundef %x, i64 noundef %y, i64 noundef %z) {
1743; RV64I-LABEL: sh6_sh3_add1:
1744; RV64I:       # %bb.0: # %entry
1745; RV64I-NEXT:    slli a2, a2, 3
1746; RV64I-NEXT:    slli a1, a1, 6
1747; RV64I-NEXT:    add a1, a1, a2
1748; RV64I-NEXT:    add a0, a1, a0
1749; RV64I-NEXT:    ret
1750;
1751; RV64ZBA-LABEL: sh6_sh3_add1:
1752; RV64ZBA:       # %bb.0: # %entry
1753; RV64ZBA-NEXT:    sh3add a1, a1, a2
1754; RV64ZBA-NEXT:    sh3add a0, a1, a0
1755; RV64ZBA-NEXT:    ret
1756entry:
1757  %shl = shl i64 %z, 3
1758  %shl1 = shl i64 %y, 6
1759  %add = add nsw i64 %shl1, %shl
1760  %add2 = add nsw i64 %add, %x
1761  ret i64 %add2
1762}
1763
1764define i64 @sh6_sh3_add2(i64 noundef %x, i64 noundef %y, i64 noundef %z) {
1765; RV64I-LABEL: sh6_sh3_add2:
1766; RV64I:       # %bb.0: # %entry
1767; RV64I-NEXT:    slli a2, a2, 3
1768; RV64I-NEXT:    slli a1, a1, 6
1769; RV64I-NEXT:    add a0, a1, a0
1770; RV64I-NEXT:    add a0, a0, a2
1771; RV64I-NEXT:    ret
1772;
1773; RV64ZBA-LABEL: sh6_sh3_add2:
1774; RV64ZBA:       # %bb.0: # %entry
1775; RV64ZBA-NEXT:    sh3add a1, a1, a2
1776; RV64ZBA-NEXT:    sh3add a0, a1, a0
1777; RV64ZBA-NEXT:    ret
1778entry:
1779  %shl = shl i64 %z, 3
1780  %shl1 = shl i64 %y, 6
1781  %add = add nsw i64 %shl1, %x
1782  %add2 = add nsw i64 %add, %shl
1783  ret i64 %add2
1784}
1785
1786define i64 @sh6_sh3_add3(i64 noundef %x, i64 noundef %y, i64 noundef %z) {
1787; RV64I-LABEL: sh6_sh3_add3:
1788; RV64I:       # %bb.0: # %entry
1789; RV64I-NEXT:    slli a2, a2, 3
1790; RV64I-NEXT:    slli a1, a1, 6
1791; RV64I-NEXT:    add a1, a1, a2
1792; RV64I-NEXT:    add a0, a0, a1
1793; RV64I-NEXT:    ret
1794;
1795; RV64ZBA-LABEL: sh6_sh3_add3:
1796; RV64ZBA:       # %bb.0: # %entry
1797; RV64ZBA-NEXT:    sh3add a1, a1, a2
1798; RV64ZBA-NEXT:    sh3add a0, a1, a0
1799; RV64ZBA-NEXT:    ret
1800entry:
1801  %shl = shl i64 %z, 3
1802  %shl1 = shl i64 %y, 6
1803  %add = add nsw i64 %shl1, %shl
1804  %add2 = add nsw i64 %x, %add
1805  ret i64 %add2
1806}
1807
1808define i64 @sh6_sh3_add4(i64 noundef %x, i64 noundef %y, i64 noundef %z) {
1809; RV64I-LABEL: sh6_sh3_add4:
1810; RV64I:       # %bb.0: # %entry
1811; RV64I-NEXT:    slli a2, a2, 3
1812; RV64I-NEXT:    slli a1, a1, 6
1813; RV64I-NEXT:    add a0, a0, a2
1814; RV64I-NEXT:    add a0, a0, a1
1815; RV64I-NEXT:    ret
1816;
1817; RV64ZBA-LABEL: sh6_sh3_add4:
1818; RV64ZBA:       # %bb.0: # %entry
1819; RV64ZBA-NEXT:    slli a1, a1, 6
1820; RV64ZBA-NEXT:    sh3add a0, a2, a0
1821; RV64ZBA-NEXT:    add a0, a0, a1
1822; RV64ZBA-NEXT:    ret
1823entry:
1824  %shl = shl i64 %z, 3
1825  %shl1 = shl i64 %y, 6
1826  %add = add nsw i64 %x, %shl
1827  %add2 = add nsw i64 %add, %shl1
1828  ret i64 %add2
1829}
1830
1831; Make sure we use sext.h+slli+srli for Zba+Zbb.
1832define zeroext i32 @sext_ashr_zext_i16(i16 %a) nounwind {
1833; RV64I-LABEL: sext_ashr_zext_i16:
1834; RV64I:       # %bb.0:
1835; RV64I-NEXT:    slli a0, a0, 48
1836; RV64I-NEXT:    srai a0, a0, 25
1837; RV64I-NEXT:    srli a0, a0, 32
1838; RV64I-NEXT:    ret
1839;
1840; RV64ZBANOZBB-LABEL: sext_ashr_zext_i16:
1841; RV64ZBANOZBB:       # %bb.0:
1842; RV64ZBANOZBB-NEXT:    slli a0, a0, 48
1843; RV64ZBANOZBB-NEXT:    srai a0, a0, 25
1844; RV64ZBANOZBB-NEXT:    srli a0, a0, 32
1845; RV64ZBANOZBB-NEXT:    ret
1846;
1847; RV64ZBAZBB-LABEL: sext_ashr_zext_i16:
1848; RV64ZBAZBB:       # %bb.0:
1849; RV64ZBAZBB-NEXT:    sext.h a0, a0
1850; RV64ZBAZBB-NEXT:    slli a0, a0, 23
1851; RV64ZBAZBB-NEXT:    srli a0, a0, 32
1852; RV64ZBAZBB-NEXT:    ret
1853  %ext = sext i16 %a to i32
1854  %1 = ashr i32 %ext, 9
1855  ret i32 %1
1856}
1857
1858; This the IR you get from InstCombine if take the difference of 2 pointers and
1859; cast is to unsigned before using as an index.
1860define signext i16 @sh1adduw_ptrdiff(i64 %diff, ptr %baseptr) {
1861; RV64I-LABEL: sh1adduw_ptrdiff:
1862; RV64I:       # %bb.0:
1863; RV64I-NEXT:    li a2, 1
1864; RV64I-NEXT:    slli a2, a2, 33
1865; RV64I-NEXT:    addi a2, a2, -2
1866; RV64I-NEXT:    and a0, a0, a2
1867; RV64I-NEXT:    add a0, a1, a0
1868; RV64I-NEXT:    lh a0, 0(a0)
1869; RV64I-NEXT:    ret
1870;
1871; RV64ZBA-LABEL: sh1adduw_ptrdiff:
1872; RV64ZBA:       # %bb.0:
1873; RV64ZBA-NEXT:    srli a0, a0, 1
1874; RV64ZBA-NEXT:    sh1add.uw a0, a0, a1
1875; RV64ZBA-NEXT:    lh a0, 0(a0)
1876; RV64ZBA-NEXT:    ret
1877  %ptrdiff = lshr exact i64 %diff, 1
1878  %cast = and i64 %ptrdiff, 4294967295
1879  %ptr = getelementptr inbounds i16, ptr %baseptr, i64 %cast
1880  %res = load i16, ptr %ptr
1881  ret i16 %res
1882}
1883
1884define signext i32 @sh2adduw_ptrdiff(i64 %diff, ptr %baseptr) {
1885; RV64I-LABEL: sh2adduw_ptrdiff:
1886; RV64I:       # %bb.0:
1887; RV64I-NEXT:    li a2, 1
1888; RV64I-NEXT:    slli a2, a2, 34
1889; RV64I-NEXT:    addi a2, a2, -4
1890; RV64I-NEXT:    and a0, a0, a2
1891; RV64I-NEXT:    add a0, a1, a0
1892; RV64I-NEXT:    lw a0, 0(a0)
1893; RV64I-NEXT:    ret
1894;
1895; RV64ZBA-LABEL: sh2adduw_ptrdiff:
1896; RV64ZBA:       # %bb.0:
1897; RV64ZBA-NEXT:    srli a0, a0, 2
1898; RV64ZBA-NEXT:    sh2add.uw a0, a0, a1
1899; RV64ZBA-NEXT:    lw a0, 0(a0)
1900; RV64ZBA-NEXT:    ret
1901  %ptrdiff = lshr exact i64 %diff, 2
1902  %cast = and i64 %ptrdiff, 4294967295
1903  %ptr = getelementptr inbounds i32, ptr %baseptr, i64 %cast
1904  %res = load i32, ptr %ptr
1905  ret i32 %res
1906}
1907
1908define i64 @sh3adduw_ptrdiff(i64 %diff, ptr %baseptr) {
1909; RV64I-LABEL: sh3adduw_ptrdiff:
1910; RV64I:       # %bb.0:
1911; RV64I-NEXT:    li a2, 1
1912; RV64I-NEXT:    slli a2, a2, 35
1913; RV64I-NEXT:    addi a2, a2, -8
1914; RV64I-NEXT:    and a0, a0, a2
1915; RV64I-NEXT:    add a0, a1, a0
1916; RV64I-NEXT:    ld a0, 0(a0)
1917; RV64I-NEXT:    ret
1918;
1919; RV64ZBA-LABEL: sh3adduw_ptrdiff:
1920; RV64ZBA:       # %bb.0:
1921; RV64ZBA-NEXT:    srli a0, a0, 3
1922; RV64ZBA-NEXT:    sh3add.uw a0, a0, a1
1923; RV64ZBA-NEXT:    ld a0, 0(a0)
1924; RV64ZBA-NEXT:    ret
1925  %ptrdiff = lshr exact i64 %diff, 3
1926  %cast = and i64 %ptrdiff, 4294967295
1927  %ptr = getelementptr inbounds i64, ptr %baseptr, i64 %cast
1928  %res = load i64, ptr %ptr
1929  ret i64 %res
1930}
1931
1932define signext i16 @srliw_1_sh1add(ptr %0, i32 signext %1) {
1933; RV64I-LABEL: srliw_1_sh1add:
1934; RV64I:       # %bb.0:
1935; RV64I-NEXT:    srliw a1, a1, 1
1936; RV64I-NEXT:    slli a1, a1, 1
1937; RV64I-NEXT:    add a0, a0, a1
1938; RV64I-NEXT:    lh a0, 0(a0)
1939; RV64I-NEXT:    ret
1940;
1941; RV64ZBA-LABEL: srliw_1_sh1add:
1942; RV64ZBA:       # %bb.0:
1943; RV64ZBA-NEXT:    srliw a1, a1, 1
1944; RV64ZBA-NEXT:    sh1add a0, a1, a0
1945; RV64ZBA-NEXT:    lh a0, 0(a0)
1946; RV64ZBA-NEXT:    ret
1947  %3 = lshr i32 %1, 1
1948  %4 = zext i32 %3 to i64
1949  %5 = getelementptr inbounds i16, ptr %0, i64 %4
1950  %6 = load i16, ptr %5, align 2
1951  ret i16 %6
1952}
1953
1954define i128 @slliuw_ptrdiff(i64 %diff, ptr %baseptr) {
1955; RV64I-LABEL: slliuw_ptrdiff:
1956; RV64I:       # %bb.0:
1957; RV64I-NEXT:    li a2, 1
1958; RV64I-NEXT:    slli a2, a2, 36
1959; RV64I-NEXT:    addi a2, a2, -16
1960; RV64I-NEXT:    and a0, a0, a2
1961; RV64I-NEXT:    add a1, a1, a0
1962; RV64I-NEXT:    ld a0, 0(a1)
1963; RV64I-NEXT:    ld a1, 8(a1)
1964; RV64I-NEXT:    ret
1965;
1966; RV64ZBA-LABEL: slliuw_ptrdiff:
1967; RV64ZBA:       # %bb.0:
1968; RV64ZBA-NEXT:    srli a0, a0, 4
1969; RV64ZBA-NEXT:    slli.uw a0, a0, 4
1970; RV64ZBA-NEXT:    add a1, a1, a0
1971; RV64ZBA-NEXT:    ld a0, 0(a1)
1972; RV64ZBA-NEXT:    ld a1, 8(a1)
1973; RV64ZBA-NEXT:    ret
1974  %ptrdiff = lshr exact i64 %diff, 4
1975  %cast = and i64 %ptrdiff, 4294967295
1976  %ptr = getelementptr inbounds i128, ptr %baseptr, i64 %cast
1977  %res = load i128, ptr %ptr
1978  ret i128 %res
1979}
1980
1981define signext i32 @srliw_2_sh2add(ptr %0, i32 signext %1) {
1982; RV64I-LABEL: srliw_2_sh2add:
1983; RV64I:       # %bb.0:
1984; RV64I-NEXT:    srliw a1, a1, 2
1985; RV64I-NEXT:    slli a1, a1, 2
1986; RV64I-NEXT:    add a0, a0, a1
1987; RV64I-NEXT:    lw a0, 0(a0)
1988; RV64I-NEXT:    ret
1989;
1990; RV64ZBA-LABEL: srliw_2_sh2add:
1991; RV64ZBA:       # %bb.0:
1992; RV64ZBA-NEXT:    srliw a1, a1, 2
1993; RV64ZBA-NEXT:    sh2add a0, a1, a0
1994; RV64ZBA-NEXT:    lw a0, 0(a0)
1995; RV64ZBA-NEXT:    ret
1996  %3 = lshr i32 %1, 2
1997  %4 = zext i32 %3 to i64
1998  %5 = getelementptr inbounds i32, ptr %0, i64 %4
1999  %6 = load i32, ptr %5, align 4
2000  ret i32 %6
2001}
2002
2003define i64 @srliw_3_sh3add(ptr %0, i32 signext %1) {
2004; RV64I-LABEL: srliw_3_sh3add:
2005; RV64I:       # %bb.0:
2006; RV64I-NEXT:    srliw a1, a1, 3
2007; RV64I-NEXT:    slli a1, a1, 3
2008; RV64I-NEXT:    add a0, a0, a1
2009; RV64I-NEXT:    ld a0, 0(a0)
2010; RV64I-NEXT:    ret
2011;
2012; RV64ZBA-LABEL: srliw_3_sh3add:
2013; RV64ZBA:       # %bb.0:
2014; RV64ZBA-NEXT:    srliw a1, a1, 3
2015; RV64ZBA-NEXT:    sh3add a0, a1, a0
2016; RV64ZBA-NEXT:    ld a0, 0(a0)
2017; RV64ZBA-NEXT:    ret
2018  %3 = lshr i32 %1, 3
2019  %4 = zext i32 %3 to i64
2020  %5 = getelementptr inbounds i64, ptr %0, i64 %4
2021  %6 = load i64, ptr %5, align 8
2022  ret i64 %6
2023}
2024
2025define signext i32 @srliw_1_sh2add(ptr %0, i32 signext %1) {
2026; RV64I-LABEL: srliw_1_sh2add:
2027; RV64I:       # %bb.0:
2028; RV64I-NEXT:    srliw a1, a1, 1
2029; RV64I-NEXT:    slli a1, a1, 2
2030; RV64I-NEXT:    add a0, a0, a1
2031; RV64I-NEXT:    lw a0, 0(a0)
2032; RV64I-NEXT:    ret
2033;
2034; RV64ZBA-LABEL: srliw_1_sh2add:
2035; RV64ZBA:       # %bb.0:
2036; RV64ZBA-NEXT:    srliw a1, a1, 1
2037; RV64ZBA-NEXT:    sh2add a0, a1, a0
2038; RV64ZBA-NEXT:    lw a0, 0(a0)
2039; RV64ZBA-NEXT:    ret
2040  %3 = lshr i32 %1, 1
2041  %4 = zext i32 %3 to i64
2042  %5 = getelementptr inbounds i32, ptr %0, i64 %4
2043  %6 = load i32, ptr %5, align 4
2044  ret i32 %6
2045}
2046
2047define i64 @srliw_1_sh3add(ptr %0, i32 signext %1) {
2048; RV64I-LABEL: srliw_1_sh3add:
2049; RV64I:       # %bb.0:
2050; RV64I-NEXT:    srliw a1, a1, 1
2051; RV64I-NEXT:    slli a1, a1, 3
2052; RV64I-NEXT:    add a0, a0, a1
2053; RV64I-NEXT:    ld a0, 0(a0)
2054; RV64I-NEXT:    ret
2055;
2056; RV64ZBA-LABEL: srliw_1_sh3add:
2057; RV64ZBA:       # %bb.0:
2058; RV64ZBA-NEXT:    srliw a1, a1, 1
2059; RV64ZBA-NEXT:    sh3add a0, a1, a0
2060; RV64ZBA-NEXT:    ld a0, 0(a0)
2061; RV64ZBA-NEXT:    ret
2062  %3 = lshr i32 %1, 1
2063  %4 = zext i32 %3 to i64
2064  %5 = getelementptr inbounds i64, ptr %0, i64 %4
2065  %6 = load i64, ptr %5, align 8
2066  ret i64 %6
2067}
2068
2069define i64 @srliw_2_sh3add(ptr %0, i32 signext %1) {
2070; RV64I-LABEL: srliw_2_sh3add:
2071; RV64I:       # %bb.0:
2072; RV64I-NEXT:    srliw a1, a1, 2
2073; RV64I-NEXT:    slli a1, a1, 3
2074; RV64I-NEXT:    add a0, a0, a1
2075; RV64I-NEXT:    ld a0, 0(a0)
2076; RV64I-NEXT:    ret
2077;
2078; RV64ZBA-LABEL: srliw_2_sh3add:
2079; RV64ZBA:       # %bb.0:
2080; RV64ZBA-NEXT:    srliw a1, a1, 2
2081; RV64ZBA-NEXT:    sh3add a0, a1, a0
2082; RV64ZBA-NEXT:    ld a0, 0(a0)
2083; RV64ZBA-NEXT:    ret
2084  %3 = lshr i32 %1, 2
2085  %4 = zext i32 %3 to i64
2086  %5 = getelementptr inbounds i64, ptr %0, i64 %4
2087  %6 = load i64, ptr %5, align 8
2088  ret i64 %6
2089}
2090
2091define signext i16 @srliw_2_sh1add(ptr %0, i32 signext %1) {
2092; RV64I-LABEL: srliw_2_sh1add:
2093; RV64I:       # %bb.0:
2094; RV64I-NEXT:    srliw a1, a1, 2
2095; RV64I-NEXT:    slli a1, a1, 1
2096; RV64I-NEXT:    add a0, a0, a1
2097; RV64I-NEXT:    lh a0, 0(a0)
2098; RV64I-NEXT:    ret
2099;
2100; RV64ZBA-LABEL: srliw_2_sh1add:
2101; RV64ZBA:       # %bb.0:
2102; RV64ZBA-NEXT:    srliw a1, a1, 2
2103; RV64ZBA-NEXT:    sh1add a0, a1, a0
2104; RV64ZBA-NEXT:    lh a0, 0(a0)
2105; RV64ZBA-NEXT:    ret
2106  %3 = lshr i32 %1, 2
2107  %4 = zext i32 %3 to i64
2108  %5 = getelementptr inbounds i16, ptr %0, i64 %4
2109  %6 = load i16, ptr %5, align 2
2110  ret i16 %6
2111}
2112
2113
2114define signext i32 @srliw_3_sh2add(ptr %0, i32 signext %1) {
2115; RV64I-LABEL: srliw_3_sh2add:
2116; RV64I:       # %bb.0:
2117; RV64I-NEXT:    srliw a1, a1, 3
2118; RV64I-NEXT:    slli a1, a1, 2
2119; RV64I-NEXT:    add a0, a0, a1
2120; RV64I-NEXT:    lw a0, 0(a0)
2121; RV64I-NEXT:    ret
2122;
2123; RV64ZBA-LABEL: srliw_3_sh2add:
2124; RV64ZBA:       # %bb.0:
2125; RV64ZBA-NEXT:    srliw a1, a1, 3
2126; RV64ZBA-NEXT:    sh2add a0, a1, a0
2127; RV64ZBA-NEXT:    lw a0, 0(a0)
2128; RV64ZBA-NEXT:    ret
2129  %3 = lshr i32 %1, 3
2130  %4 = zext i32 %3 to i64
2131  %5 = getelementptr inbounds i32, ptr %0, i64 %4
2132  %6 = load i32, ptr %5, align 4
2133  ret i32 %6
2134}
2135
2136define i64 @srliw_4_sh3add(ptr %0, i32 signext %1) {
2137; RV64I-LABEL: srliw_4_sh3add:
2138; RV64I:       # %bb.0:
2139; RV64I-NEXT:    srliw a1, a1, 4
2140; RV64I-NEXT:    slli a1, a1, 3
2141; RV64I-NEXT:    add a0, a0, a1
2142; RV64I-NEXT:    ld a0, 0(a0)
2143; RV64I-NEXT:    ret
2144;
2145; RV64ZBA-LABEL: srliw_4_sh3add:
2146; RV64ZBA:       # %bb.0:
2147; RV64ZBA-NEXT:    srliw a1, a1, 4
2148; RV64ZBA-NEXT:    sh3add a0, a1, a0
2149; RV64ZBA-NEXT:    ld a0, 0(a0)
2150; RV64ZBA-NEXT:    ret
2151  %3 = lshr i32 %1, 4
2152  %4 = zext i32 %3 to i64
2153  %5 = getelementptr inbounds i64, ptr %0, i64 %4
2154  %6 = load i64, ptr %5, align 8
2155  ret i64 %6
2156}
2157
2158define signext i32 @srli_1_sh2add(ptr %0, i64 %1) {
2159; RV64I-LABEL: srli_1_sh2add:
2160; RV64I:       # %bb.0:
2161; RV64I-NEXT:    slli a1, a1, 1
2162; RV64I-NEXT:    andi a1, a1, -4
2163; RV64I-NEXT:    add a0, a0, a1
2164; RV64I-NEXT:    lw a0, 0(a0)
2165; RV64I-NEXT:    ret
2166;
2167; RV64ZBA-LABEL: srli_1_sh2add:
2168; RV64ZBA:       # %bb.0:
2169; RV64ZBA-NEXT:    srli a1, a1, 1
2170; RV64ZBA-NEXT:    sh2add a0, a1, a0
2171; RV64ZBA-NEXT:    lw a0, 0(a0)
2172; RV64ZBA-NEXT:    ret
2173  %3 = lshr i64 %1, 1
2174  %4 = getelementptr inbounds i32, ptr %0, i64 %3
2175  %5 = load i32, ptr %4, align 4
2176  ret i32 %5
2177}
2178
2179define i64 @srli_2_sh3add(ptr %0, i64 %1) {
2180; RV64I-LABEL: srli_2_sh3add:
2181; RV64I:       # %bb.0:
2182; RV64I-NEXT:    slli a1, a1, 1
2183; RV64I-NEXT:    andi a1, a1, -8
2184; RV64I-NEXT:    add a0, a0, a1
2185; RV64I-NEXT:    ld a0, 0(a0)
2186; RV64I-NEXT:    ret
2187;
2188; RV64ZBA-LABEL: srli_2_sh3add:
2189; RV64ZBA:       # %bb.0:
2190; RV64ZBA-NEXT:    srli a1, a1, 2
2191; RV64ZBA-NEXT:    sh3add a0, a1, a0
2192; RV64ZBA-NEXT:    ld a0, 0(a0)
2193; RV64ZBA-NEXT:    ret
2194  %3 = lshr i64 %1, 2
2195  %4 = getelementptr inbounds i64, ptr %0, i64 %3
2196  %5 = load i64, ptr %4, align 8
2197  ret i64 %5
2198}
2199
2200define signext i16 @srli_2_sh1add(ptr %0, i64 %1) {
2201; RV64I-LABEL: srli_2_sh1add:
2202; RV64I:       # %bb.0:
2203; RV64I-NEXT:    srli a1, a1, 1
2204; RV64I-NEXT:    andi a1, a1, -2
2205; RV64I-NEXT:    add a0, a0, a1
2206; RV64I-NEXT:    lh a0, 0(a0)
2207; RV64I-NEXT:    ret
2208;
2209; RV64ZBA-LABEL: srli_2_sh1add:
2210; RV64ZBA:       # %bb.0:
2211; RV64ZBA-NEXT:    srli a1, a1, 2
2212; RV64ZBA-NEXT:    sh1add a0, a1, a0
2213; RV64ZBA-NEXT:    lh a0, 0(a0)
2214; RV64ZBA-NEXT:    ret
2215  %3 = lshr i64 %1, 2
2216  %4 = getelementptr inbounds i16, ptr %0, i64 %3
2217  %5 = load i16, ptr %4, align 2
2218  ret i16 %5
2219}
2220
2221define signext i32 @srli_3_sh2add(ptr %0, i64 %1) {
2222; RV64I-LABEL: srli_3_sh2add:
2223; RV64I:       # %bb.0:
2224; RV64I-NEXT:    srli a1, a1, 1
2225; RV64I-NEXT:    andi a1, a1, -4
2226; RV64I-NEXT:    add a0, a0, a1
2227; RV64I-NEXT:    lw a0, 0(a0)
2228; RV64I-NEXT:    ret
2229;
2230; RV64ZBA-LABEL: srli_3_sh2add:
2231; RV64ZBA:       # %bb.0:
2232; RV64ZBA-NEXT:    srli a1, a1, 3
2233; RV64ZBA-NEXT:    sh2add a0, a1, a0
2234; RV64ZBA-NEXT:    lw a0, 0(a0)
2235; RV64ZBA-NEXT:    ret
2236  %3 = lshr i64 %1, 3
2237  %4 = getelementptr inbounds i32, ptr %0, i64 %3
2238  %5 = load i32, ptr %4, align 4
2239  ret i32 %5
2240}
2241
2242define i64 @srli_4_sh3add(ptr %0, i64 %1) {
2243; RV64I-LABEL: srli_4_sh3add:
2244; RV64I:       # %bb.0:
2245; RV64I-NEXT:    srli a1, a1, 1
2246; RV64I-NEXT:    andi a1, a1, -8
2247; RV64I-NEXT:    add a0, a0, a1
2248; RV64I-NEXT:    ld a0, 0(a0)
2249; RV64I-NEXT:    ret
2250;
2251; RV64ZBA-LABEL: srli_4_sh3add:
2252; RV64ZBA:       # %bb.0:
2253; RV64ZBA-NEXT:    srli a1, a1, 4
2254; RV64ZBA-NEXT:    sh3add a0, a1, a0
2255; RV64ZBA-NEXT:    ld a0, 0(a0)
2256; RV64ZBA-NEXT:    ret
2257  %3 = lshr i64 %1, 4
2258  %4 = getelementptr inbounds i64, ptr %0, i64 %3
2259  %5 = load i64, ptr %4, align 8
2260  ret i64 %5
2261}
2262
2263define signext i16 @shl_2_sh1adduw(ptr %0, i32 signext %1) {
2264; RV64I-LABEL: shl_2_sh1adduw:
2265; RV64I:       # %bb.0:
2266; RV64I-NEXT:    slli a1, a1, 34
2267; RV64I-NEXT:    srli a1, a1, 31
2268; RV64I-NEXT:    add a0, a0, a1
2269; RV64I-NEXT:    lh a0, 0(a0)
2270; RV64I-NEXT:    ret
2271;
2272; RV64ZBA-LABEL: shl_2_sh1adduw:
2273; RV64ZBA:       # %bb.0:
2274; RV64ZBA-NEXT:    slli a1, a1, 2
2275; RV64ZBA-NEXT:    sh1add.uw a0, a1, a0
2276; RV64ZBA-NEXT:    lh a0, 0(a0)
2277; RV64ZBA-NEXT:    ret
2278  %3 = shl i32 %1, 2
2279  %4 = zext i32 %3 to i64
2280  %5 = getelementptr inbounds i16, ptr %0, i64 %4
2281  %6 = load i16, ptr %5, align 2
2282  ret i16 %6
2283}
2284
2285define signext i32 @shl_16_sh2adduw(ptr %0, i32 signext %1) {
2286; RV64I-LABEL: shl_16_sh2adduw:
2287; RV64I:       # %bb.0:
2288; RV64I-NEXT:    slli a1, a1, 48
2289; RV64I-NEXT:    srli a1, a1, 30
2290; RV64I-NEXT:    add a0, a0, a1
2291; RV64I-NEXT:    lw a0, 0(a0)
2292; RV64I-NEXT:    ret
2293;
2294; RV64ZBA-LABEL: shl_16_sh2adduw:
2295; RV64ZBA:       # %bb.0:
2296; RV64ZBA-NEXT:    slli a1, a1, 16
2297; RV64ZBA-NEXT:    sh2add.uw a0, a1, a0
2298; RV64ZBA-NEXT:    lw a0, 0(a0)
2299; RV64ZBA-NEXT:    ret
2300  %3 = shl i32 %1, 16
2301  %4 = zext i32 %3 to i64
2302  %5 = getelementptr inbounds i32, ptr %0, i64 %4
2303  %6 = load i32, ptr %5, align 4
2304  ret i32 %6
2305}
2306
2307define i64 @shl_31_sh3adduw(ptr %0, i32 signext %1) {
2308; RV64I-LABEL: shl_31_sh3adduw:
2309; RV64I:       # %bb.0:
2310; RV64I-NEXT:    slli a1, a1, 63
2311; RV64I-NEXT:    srli a1, a1, 29
2312; RV64I-NEXT:    add a0, a0, a1
2313; RV64I-NEXT:    ld a0, 0(a0)
2314; RV64I-NEXT:    ret
2315;
2316; RV64ZBA-LABEL: shl_31_sh3adduw:
2317; RV64ZBA:       # %bb.0:
2318; RV64ZBA-NEXT:    slli a1, a1, 31
2319; RV64ZBA-NEXT:    sh3add.uw a0, a1, a0
2320; RV64ZBA-NEXT:    ld a0, 0(a0)
2321; RV64ZBA-NEXT:    ret
2322  %3 = shl i32 %1, 31
2323  %4 = zext i32 %3 to i64
2324  %5 = getelementptr inbounds i64, ptr %0, i64 %4
2325  %6 = load i64, ptr %5, align 8
2326  ret i64 %6
2327}
2328
2329define i64 @pack_i64(i64 %a, i64 %b) nounwind {
2330; RV64I-LABEL: pack_i64:
2331; RV64I:       # %bb.0:
2332; RV64I-NEXT:    slli a0, a0, 32
2333; RV64I-NEXT:    srli a0, a0, 32
2334; RV64I-NEXT:    slli a1, a1, 32
2335; RV64I-NEXT:    or a0, a1, a0
2336; RV64I-NEXT:    ret
2337;
2338; RV64ZBA-LABEL: pack_i64:
2339; RV64ZBA:       # %bb.0:
2340; RV64ZBA-NEXT:    slli a1, a1, 32
2341; RV64ZBA-NEXT:    add.uw a0, a0, a1
2342; RV64ZBA-NEXT:    ret
2343  %shl = and i64 %a, 4294967295
2344  %shl1 = shl i64 %b, 32
2345  %or = or i64 %shl1, %shl
2346  ret i64 %or
2347}
2348
2349define i64 @pack_i64_2(i32 signext %a, i32 signext %b) nounwind {
2350; RV64I-LABEL: pack_i64_2:
2351; RV64I:       # %bb.0:
2352; RV64I-NEXT:    slli a0, a0, 32
2353; RV64I-NEXT:    srli a0, a0, 32
2354; RV64I-NEXT:    slli a1, a1, 32
2355; RV64I-NEXT:    or a0, a1, a0
2356; RV64I-NEXT:    ret
2357;
2358; RV64ZBA-LABEL: pack_i64_2:
2359; RV64ZBA:       # %bb.0:
2360; RV64ZBA-NEXT:    slli a1, a1, 32
2361; RV64ZBA-NEXT:    add.uw a0, a0, a1
2362; RV64ZBA-NEXT:    ret
2363  %zexta = zext i32 %a to i64
2364  %zextb = zext i32 %b to i64
2365  %shl1 = shl i64 %zextb, 32
2366  %or = or i64 %shl1, %zexta
2367  ret i64 %or
2368}
2369
2370define i64 @pack_i64_disjoint(i64 %a, i64 %b) nounwind {
2371; RV64I-LABEL: pack_i64_disjoint:
2372; RV64I:       # %bb.0:
2373; RV64I-NEXT:    slli a0, a0, 32
2374; RV64I-NEXT:    srli a0, a0, 32
2375; RV64I-NEXT:    or a0, a1, a0
2376; RV64I-NEXT:    ret
2377;
2378; RV64ZBA-LABEL: pack_i64_disjoint:
2379; RV64ZBA:       # %bb.0:
2380; RV64ZBA-NEXT:    add.uw a0, a0, a1
2381; RV64ZBA-NEXT:    ret
2382  %shl = and i64 %a, 4294967295
2383  %or = or disjoint i64 %b, %shl
2384  ret i64 %or
2385}
2386
2387define i64 @pack_i64_disjoint_2(i32 signext %a, i64 %b) nounwind {
2388; RV64I-LABEL: pack_i64_disjoint_2:
2389; RV64I:       # %bb.0:
2390; RV64I-NEXT:    slli a0, a0, 32
2391; RV64I-NEXT:    srli a0, a0, 32
2392; RV64I-NEXT:    or a0, a1, a0
2393; RV64I-NEXT:    ret
2394;
2395; RV64ZBA-LABEL: pack_i64_disjoint_2:
2396; RV64ZBA:       # %bb.0:
2397; RV64ZBA-NEXT:    add.uw a0, a0, a1
2398; RV64ZBA-NEXT:    ret
2399  %zexta = zext i32 %a to i64
2400  %or = or disjoint i64 %b, %zexta
2401  ret i64 %or
2402}
2403
2404define i8 @array_index_sh1_sh0(ptr %p, i64 %idx1, i64 %idx2) {
2405; RV64I-LABEL: array_index_sh1_sh0:
2406; RV64I:       # %bb.0:
2407; RV64I-NEXT:    slli a1, a1, 1
2408; RV64I-NEXT:    add a0, a0, a2
2409; RV64I-NEXT:    add a0, a0, a1
2410; RV64I-NEXT:    lbu a0, 0(a0)
2411; RV64I-NEXT:    ret
2412;
2413; RV64ZBA-LABEL: array_index_sh1_sh0:
2414; RV64ZBA:       # %bb.0:
2415; RV64ZBA-NEXT:    sh1add a0, a1, a0
2416; RV64ZBA-NEXT:    add a0, a0, a2
2417; RV64ZBA-NEXT:    lbu a0, 0(a0)
2418; RV64ZBA-NEXT:    ret
2419  %a = getelementptr inbounds [2 x i8], ptr %p, i64 %idx1, i64 %idx2
2420  %b = load i8, ptr %a, align 1
2421  ret i8 %b
2422}
2423
2424define i16 @array_index_sh1_sh1(ptr %p, i64 %idx1, i64 %idx2) {
2425; RV64I-LABEL: array_index_sh1_sh1:
2426; RV64I:       # %bb.0:
2427; RV64I-NEXT:    slli a1, a1, 2
2428; RV64I-NEXT:    add a0, a0, a1
2429; RV64I-NEXT:    slli a2, a2, 1
2430; RV64I-NEXT:    add a0, a0, a2
2431; RV64I-NEXT:    lh a0, 0(a0)
2432; RV64I-NEXT:    ret
2433;
2434; RV64ZBA-LABEL: array_index_sh1_sh1:
2435; RV64ZBA:       # %bb.0:
2436; RV64ZBA-NEXT:    sh2add a0, a1, a0
2437; RV64ZBA-NEXT:    sh1add a0, a2, a0
2438; RV64ZBA-NEXT:    lh a0, 0(a0)
2439; RV64ZBA-NEXT:    ret
2440  %a = getelementptr inbounds [2 x i16], ptr %p, i64 %idx1, i64 %idx2
2441  %b = load i16, ptr %a, align 2
2442  ret i16 %b
2443}
2444
2445define i32 @array_index_sh1_sh2(ptr %p, i64 %idx1, i64 %idx2) {
2446; RV64I-LABEL: array_index_sh1_sh2:
2447; RV64I:       # %bb.0:
2448; RV64I-NEXT:    slli a1, a1, 3
2449; RV64I-NEXT:    add a0, a0, a1
2450; RV64I-NEXT:    slli a2, a2, 2
2451; RV64I-NEXT:    add a0, a0, a2
2452; RV64I-NEXT:    lw a0, 0(a0)
2453; RV64I-NEXT:    ret
2454;
2455; RV64ZBA-LABEL: array_index_sh1_sh2:
2456; RV64ZBA:       # %bb.0:
2457; RV64ZBA-NEXT:    sh3add a0, a1, a0
2458; RV64ZBA-NEXT:    sh2add a0, a2, a0
2459; RV64ZBA-NEXT:    lw a0, 0(a0)
2460; RV64ZBA-NEXT:    ret
2461  %a = getelementptr inbounds [2 x i32], ptr %p, i64 %idx1, i64 %idx2
2462  %b = load i32, ptr %a, align 4
2463  ret i32 %b
2464}
2465
2466define i64 @array_index_sh1_sh3(ptr %p, i64 %idx1, i64 %idx2) {
2467; RV64I-LABEL: array_index_sh1_sh3:
2468; RV64I:       # %bb.0:
2469; RV64I-NEXT:    slli a1, a1, 4
2470; RV64I-NEXT:    add a0, a0, a1
2471; RV64I-NEXT:    slli a2, a2, 3
2472; RV64I-NEXT:    add a0, a0, a2
2473; RV64I-NEXT:    ld a0, 0(a0)
2474; RV64I-NEXT:    ret
2475;
2476; RV64ZBA-LABEL: array_index_sh1_sh3:
2477; RV64ZBA:       # %bb.0:
2478; RV64ZBA-NEXT:    sh1add a1, a1, a2
2479; RV64ZBA-NEXT:    sh3add a0, a1, a0
2480; RV64ZBA-NEXT:    ld a0, 0(a0)
2481; RV64ZBA-NEXT:    ret
2482  %a = getelementptr inbounds [2 x i64], ptr %p, i64 %idx1, i64 %idx2
2483  %b = load i64, ptr %a, align 8
2484  ret i64 %b
2485}
2486
2487define i8 @array_index_sh2_sh0(ptr %p, i64 %idx1, i64 %idx2) {
2488; RV64I-LABEL: array_index_sh2_sh0:
2489; RV64I:       # %bb.0:
2490; RV64I-NEXT:    slli a1, a1, 2
2491; RV64I-NEXT:    add a0, a0, a2
2492; RV64I-NEXT:    add a0, a0, a1
2493; RV64I-NEXT:    lbu a0, 0(a0)
2494; RV64I-NEXT:    ret
2495;
2496; RV64ZBA-LABEL: array_index_sh2_sh0:
2497; RV64ZBA:       # %bb.0:
2498; RV64ZBA-NEXT:    sh2add a0, a1, a0
2499; RV64ZBA-NEXT:    add a0, a0, a2
2500; RV64ZBA-NEXT:    lbu a0, 0(a0)
2501; RV64ZBA-NEXT:    ret
2502  %a = getelementptr inbounds [4 x i8], ptr %p, i64 %idx1, i64 %idx2
2503  %b = load i8, ptr %a, align 1
2504  ret i8 %b
2505}
2506
2507define i16 @array_index_sh2_sh1(ptr %p, i64 %idx1, i64 %idx2) {
2508; RV64I-LABEL: array_index_sh2_sh1:
2509; RV64I:       # %bb.0:
2510; RV64I-NEXT:    slli a1, a1, 3
2511; RV64I-NEXT:    add a0, a0, a1
2512; RV64I-NEXT:    slli a2, a2, 1
2513; RV64I-NEXT:    add a0, a0, a2
2514; RV64I-NEXT:    lh a0, 0(a0)
2515; RV64I-NEXT:    ret
2516;
2517; RV64ZBA-LABEL: array_index_sh2_sh1:
2518; RV64ZBA:       # %bb.0:
2519; RV64ZBA-NEXT:    sh3add a0, a1, a0
2520; RV64ZBA-NEXT:    sh1add a0, a2, a0
2521; RV64ZBA-NEXT:    lh a0, 0(a0)
2522; RV64ZBA-NEXT:    ret
2523  %a = getelementptr inbounds [4 x i16], ptr %p, i64 %idx1, i64 %idx2
2524  %b = load i16, ptr %a, align 2
2525  ret i16 %b
2526}
2527
2528define i32 @array_index_sh2_sh2(ptr %p, i64 %idx1, i64 %idx2) {
2529; RV64I-LABEL: array_index_sh2_sh2:
2530; RV64I:       # %bb.0:
2531; RV64I-NEXT:    slli a1, a1, 4
2532; RV64I-NEXT:    add a0, a0, a1
2533; RV64I-NEXT:    slli a2, a2, 2
2534; RV64I-NEXT:    add a0, a0, a2
2535; RV64I-NEXT:    lw a0, 0(a0)
2536; RV64I-NEXT:    ret
2537;
2538; RV64ZBA-LABEL: array_index_sh2_sh2:
2539; RV64ZBA:       # %bb.0:
2540; RV64ZBA-NEXT:    sh2add a1, a1, a2
2541; RV64ZBA-NEXT:    sh2add a0, a1, a0
2542; RV64ZBA-NEXT:    lw a0, 0(a0)
2543; RV64ZBA-NEXT:    ret
2544  %a = getelementptr inbounds [4 x i32], ptr %p, i64 %idx1, i64 %idx2
2545  %b = load i32, ptr %a, align 4
2546  ret i32 %b
2547}
2548
2549define i64 @array_index_sh2_sh3(ptr %p, i64 %idx1, i64 %idx2) {
2550; RV64I-LABEL: array_index_sh2_sh3:
2551; RV64I:       # %bb.0:
2552; RV64I-NEXT:    slli a1, a1, 5
2553; RV64I-NEXT:    add a0, a0, a1
2554; RV64I-NEXT:    slli a2, a2, 3
2555; RV64I-NEXT:    add a0, a0, a2
2556; RV64I-NEXT:    ld a0, 0(a0)
2557; RV64I-NEXT:    ret
2558;
2559; RV64ZBA-LABEL: array_index_sh2_sh3:
2560; RV64ZBA:       # %bb.0:
2561; RV64ZBA-NEXT:    sh2add a1, a1, a2
2562; RV64ZBA-NEXT:    sh3add a0, a1, a0
2563; RV64ZBA-NEXT:    ld a0, 0(a0)
2564; RV64ZBA-NEXT:    ret
2565  %a = getelementptr inbounds [4 x i64], ptr %p, i64 %idx1, i64 %idx2
2566  %b = load i64, ptr %a, align 8
2567  ret i64 %b
2568}
2569
2570define i8 @array_index_sh3_sh0(ptr %p, i64 %idx1, i64 %idx2) {
2571; RV64I-LABEL: array_index_sh3_sh0:
2572; RV64I:       # %bb.0:
2573; RV64I-NEXT:    slli a1, a1, 3
2574; RV64I-NEXT:    add a0, a0, a2
2575; RV64I-NEXT:    add a0, a0, a1
2576; RV64I-NEXT:    lbu a0, 0(a0)
2577; RV64I-NEXT:    ret
2578;
2579; RV64ZBA-LABEL: array_index_sh3_sh0:
2580; RV64ZBA:       # %bb.0:
2581; RV64ZBA-NEXT:    sh3add a0, a1, a0
2582; RV64ZBA-NEXT:    add a0, a0, a2
2583; RV64ZBA-NEXT:    lbu a0, 0(a0)
2584; RV64ZBA-NEXT:    ret
2585  %a = getelementptr inbounds [8 x i8], ptr %p, i64 %idx1, i64 %idx2
2586  %b = load i8, ptr %a, align 1
2587  ret i8 %b
2588}
2589
2590define i16 @array_index_sh3_sh1(ptr %p, i64 %idx1, i64 %idx2) {
2591; RV64I-LABEL: array_index_sh3_sh1:
2592; RV64I:       # %bb.0:
2593; RV64I-NEXT:    slli a1, a1, 4
2594; RV64I-NEXT:    add a0, a0, a1
2595; RV64I-NEXT:    slli a2, a2, 1
2596; RV64I-NEXT:    add a0, a0, a2
2597; RV64I-NEXT:    lh a0, 0(a0)
2598; RV64I-NEXT:    ret
2599;
2600; RV64ZBA-LABEL: array_index_sh3_sh1:
2601; RV64ZBA:       # %bb.0:
2602; RV64ZBA-NEXT:    sh3add a1, a1, a2
2603; RV64ZBA-NEXT:    sh1add a0, a1, a0
2604; RV64ZBA-NEXT:    lh a0, 0(a0)
2605; RV64ZBA-NEXT:    ret
2606  %a = getelementptr inbounds [8 x i16], ptr %p, i64 %idx1, i64 %idx2
2607  %b = load i16, ptr %a, align 2
2608  ret i16 %b
2609}
2610
2611define i32 @array_index_sh3_sh2(ptr %p, i64 %idx1, i64 %idx2) {
2612; RV64I-LABEL: array_index_sh3_sh2:
2613; RV64I:       # %bb.0:
2614; RV64I-NEXT:    slli a1, a1, 5
2615; RV64I-NEXT:    add a0, a0, a1
2616; RV64I-NEXT:    slli a2, a2, 2
2617; RV64I-NEXT:    add a0, a0, a2
2618; RV64I-NEXT:    lw a0, 0(a0)
2619; RV64I-NEXT:    ret
2620;
2621; RV64ZBA-LABEL: array_index_sh3_sh2:
2622; RV64ZBA:       # %bb.0:
2623; RV64ZBA-NEXT:    sh3add a1, a1, a2
2624; RV64ZBA-NEXT:    sh2add a0, a1, a0
2625; RV64ZBA-NEXT:    lw a0, 0(a0)
2626; RV64ZBA-NEXT:    ret
2627  %a = getelementptr inbounds [8 x i32], ptr %p, i64 %idx1, i64 %idx2
2628  %b = load i32, ptr %a, align 4
2629  ret i32 %b
2630}
2631
2632define i64 @array_index_sh3_sh3(ptr %p, i64 %idx1, i64 %idx2) {
2633; RV64I-LABEL: array_index_sh3_sh3:
2634; RV64I:       # %bb.0:
2635; RV64I-NEXT:    slli a1, a1, 6
2636; RV64I-NEXT:    add a0, a0, a1
2637; RV64I-NEXT:    slli a2, a2, 3
2638; RV64I-NEXT:    add a0, a0, a2
2639; RV64I-NEXT:    ld a0, 0(a0)
2640; RV64I-NEXT:    ret
2641;
2642; RV64ZBA-LABEL: array_index_sh3_sh3:
2643; RV64ZBA:       # %bb.0:
2644; RV64ZBA-NEXT:    sh3add a1, a1, a2
2645; RV64ZBA-NEXT:    sh3add a0, a1, a0
2646; RV64ZBA-NEXT:    ld a0, 0(a0)
2647; RV64ZBA-NEXT:    ret
2648  %a = getelementptr inbounds [8 x i64], ptr %p, i64 %idx1, i64 %idx2
2649  %b = load i64, ptr %a, align 8
2650  ret i64 %b
2651}
2652
2653; Similar to above, but with a lshr on one of the indices. This requires
2654; special handling during isel to form a shift pair.
2655define i64 @array_index_lshr_sh3_sh3(ptr %p, i64 %idx1, i64 %idx2) {
2656; RV64I-LABEL: array_index_lshr_sh3_sh3:
2657; RV64I:       # %bb.0:
2658; RV64I-NEXT:    srli a1, a1, 58
2659; RV64I-NEXT:    slli a2, a2, 3
2660; RV64I-NEXT:    slli a1, a1, 6
2661; RV64I-NEXT:    add a0, a0, a2
2662; RV64I-NEXT:    add a0, a0, a1
2663; RV64I-NEXT:    ld a0, 0(a0)
2664; RV64I-NEXT:    ret
2665;
2666; RV64ZBA-LABEL: array_index_lshr_sh3_sh3:
2667; RV64ZBA:       # %bb.0:
2668; RV64ZBA-NEXT:    srli a1, a1, 58
2669; RV64ZBA-NEXT:    sh3add a1, a1, a2
2670; RV64ZBA-NEXT:    sh3add a0, a1, a0
2671; RV64ZBA-NEXT:    ld a0, 0(a0)
2672; RV64ZBA-NEXT:    ret
2673  %shr = lshr i64 %idx1, 58
2674  %a = getelementptr inbounds [8 x i64], ptr %p, i64 %shr, i64 %idx2
2675  %b = load i64, ptr %a, align 8
2676  ret i64 %b
2677}
2678
2679define i8 @array_index_sh4_sh0(ptr %p, i64 %idx1, i64 %idx2) {
2680; CHECK-LABEL: array_index_sh4_sh0:
2681; CHECK:       # %bb.0:
2682; CHECK-NEXT:    slli a1, a1, 4
2683; CHECK-NEXT:    add a0, a0, a2
2684; CHECK-NEXT:    add a0, a0, a1
2685; CHECK-NEXT:    lbu a0, 0(a0)
2686; CHECK-NEXT:    ret
2687  %a = getelementptr inbounds [16 x i8], ptr %p, i64 %idx1, i64 %idx2
2688  %b = load i8, ptr %a, align 1
2689  ret i8 %b
2690}
2691
2692define i16 @array_index_sh4_sh1(ptr %p, i64 %idx1, i64 %idx2) {
2693; RV64I-LABEL: array_index_sh4_sh1:
2694; RV64I:       # %bb.0:
2695; RV64I-NEXT:    slli a1, a1, 5
2696; RV64I-NEXT:    add a0, a0, a1
2697; RV64I-NEXT:    slli a2, a2, 1
2698; RV64I-NEXT:    add a0, a0, a2
2699; RV64I-NEXT:    lh a0, 0(a0)
2700; RV64I-NEXT:    ret
2701;
2702; RV64ZBA-LABEL: array_index_sh4_sh1:
2703; RV64ZBA:       # %bb.0:
2704; RV64ZBA-NEXT:    slli a1, a1, 5
2705; RV64ZBA-NEXT:    add a0, a0, a1
2706; RV64ZBA-NEXT:    sh1add a0, a2, a0
2707; RV64ZBA-NEXT:    lh a0, 0(a0)
2708; RV64ZBA-NEXT:    ret
2709  %a = getelementptr inbounds [16 x i16], ptr %p, i64 %idx1, i64 %idx2
2710  %b = load i16, ptr %a, align 2
2711  ret i16 %b
2712}
2713
2714define i32 @array_index_sh4_sh2(ptr %p, i64 %idx1, i64 %idx2) {
2715; RV64I-LABEL: array_index_sh4_sh2:
2716; RV64I:       # %bb.0:
2717; RV64I-NEXT:    slli a1, a1, 6
2718; RV64I-NEXT:    add a0, a0, a1
2719; RV64I-NEXT:    slli a2, a2, 2
2720; RV64I-NEXT:    add a0, a0, a2
2721; RV64I-NEXT:    lw a0, 0(a0)
2722; RV64I-NEXT:    ret
2723;
2724; RV64ZBA-LABEL: array_index_sh4_sh2:
2725; RV64ZBA:       # %bb.0:
2726; RV64ZBA-NEXT:    slli a1, a1, 6
2727; RV64ZBA-NEXT:    add a0, a0, a1
2728; RV64ZBA-NEXT:    sh2add a0, a2, a0
2729; RV64ZBA-NEXT:    lw a0, 0(a0)
2730; RV64ZBA-NEXT:    ret
2731  %a = getelementptr inbounds [16 x i32], ptr %p, i64 %idx1, i64 %idx2
2732  %b = load i32, ptr %a, align 4
2733  ret i32 %b
2734}
2735
2736define i64 @array_index_sh4_sh3(ptr %p, i64 %idx1, i64 %idx2) {
2737; RV64I-LABEL: array_index_sh4_sh3:
2738; RV64I:       # %bb.0:
2739; RV64I-NEXT:    slli a1, a1, 7
2740; RV64I-NEXT:    add a0, a0, a1
2741; RV64I-NEXT:    slli a2, a2, 3
2742; RV64I-NEXT:    add a0, a0, a2
2743; RV64I-NEXT:    ld a0, 0(a0)
2744; RV64I-NEXT:    ret
2745;
2746; RV64ZBA-LABEL: array_index_sh4_sh3:
2747; RV64ZBA:       # %bb.0:
2748; RV64ZBA-NEXT:    slli a1, a1, 7
2749; RV64ZBA-NEXT:    add a0, a0, a1
2750; RV64ZBA-NEXT:    sh3add a0, a2, a0
2751; RV64ZBA-NEXT:    ld a0, 0(a0)
2752; RV64ZBA-NEXT:    ret
2753  %a = getelementptr inbounds [16 x i64], ptr %p, i64 %idx1, i64 %idx2
2754  %b = load i64, ptr %a, align 8
2755  ret i64 %b
2756}
2757
2758define ptr @test_gep_gep_dont_crash(ptr %p, i64 %a1, i64 %a2) {
2759; RV64I-LABEL: test_gep_gep_dont_crash:
2760; RV64I:       # %bb.0:
2761; RV64I-NEXT:    srliw a2, a2, 6
2762; RV64I-NEXT:    slli a1, a1, 3
2763; RV64I-NEXT:    slli a2, a2, 3
2764; RV64I-NEXT:    add a0, a0, a1
2765; RV64I-NEXT:    add a0, a0, a2
2766; RV64I-NEXT:    ret
2767;
2768; RV64ZBA-LABEL: test_gep_gep_dont_crash:
2769; RV64ZBA:       # %bb.0:
2770; RV64ZBA-NEXT:    srliw a2, a2, 6
2771; RV64ZBA-NEXT:    add a1, a2, a1
2772; RV64ZBA-NEXT:    sh3add a0, a1, a0
2773; RV64ZBA-NEXT:    ret
2774  %lshr = lshr i64 %a2, 6
2775  %and = and i64 %lshr, 67108863
2776  %gep1 = getelementptr i64, ptr %p, i64 %and
2777  %gep2 = getelementptr i64, ptr %gep1, i64 %a1
2778  ret ptr %gep2
2779}
2780
2781define i64 @regression(i32 signext %x, i32 signext %y) {
2782; RV64I-LABEL: regression:
2783; RV64I:       # %bb.0:
2784; RV64I-NEXT:    subw a0, a0, a1
2785; RV64I-NEXT:    slli a0, a0, 32
2786; RV64I-NEXT:    srli a1, a0, 29
2787; RV64I-NEXT:    srli a0, a0, 27
2788; RV64I-NEXT:    sub a0, a0, a1
2789; RV64I-NEXT:    ret
2790;
2791; RV64ZBA-LABEL: regression:
2792; RV64ZBA:       # %bb.0:
2793; RV64ZBA-NEXT:    subw a0, a0, a1
2794; RV64ZBA-NEXT:    slli.uw a0, a0, 3
2795; RV64ZBA-NEXT:    sh1add a0, a0, a0
2796; RV64ZBA-NEXT:    ret
2797  %sub = sub i32 %x, %y
2798  %ext = zext i32 %sub to i64
2799  %res = mul nuw nsw i64 %ext, 24
2800  ret i64 %res
2801}
2802
2803define i64 @mul_neg1(i64 %a) {
2804; CHECK-LABEL: mul_neg1:
2805; CHECK:       # %bb.0:
2806; CHECK-NEXT:    neg a0, a0
2807; CHECK-NEXT:    ret
2808  %c = mul i64 %a, -1
2809  ret i64 %c
2810}
2811
2812define i64 @mul_neg2(i64 %a) {
2813; CHECK-LABEL: mul_neg2:
2814; CHECK:       # %bb.0:
2815; CHECK-NEXT:    slli a0, a0, 1
2816; CHECK-NEXT:    neg a0, a0
2817; CHECK-NEXT:    ret
2818  %c = mul i64 %a, -2
2819  ret i64 %c
2820}
2821
2822define i64 @mul_neg3(i64 %a) {
2823; RV64I-LABEL: mul_neg3:
2824; RV64I:       # %bb.0:
2825; RV64I-NEXT:    slli a1, a0, 1
2826; RV64I-NEXT:    neg a0, a0
2827; RV64I-NEXT:    sub a0, a0, a1
2828; RV64I-NEXT:    ret
2829;
2830; RV64ZBA-LABEL: mul_neg3:
2831; RV64ZBA:       # %bb.0:
2832; RV64ZBA-NEXT:    sh1add a0, a0, a0
2833; RV64ZBA-NEXT:    neg a0, a0
2834; RV64ZBA-NEXT:    ret
2835  %c = mul i64 %a, -3
2836  ret i64 %c
2837}
2838
2839define i64 @mul_neg4(i64 %a) {
2840; CHECK-LABEL: mul_neg4:
2841; CHECK:       # %bb.0:
2842; CHECK-NEXT:    slli a0, a0, 2
2843; CHECK-NEXT:    neg a0, a0
2844; CHECK-NEXT:    ret
2845  %c = mul i64 %a, -4
2846  ret i64 %c
2847}
2848
2849define i64 @mul_neg5(i64 %a) {
2850; RV64I-LABEL: mul_neg5:
2851; RV64I:       # %bb.0:
2852; RV64I-NEXT:    slli a1, a0, 2
2853; RV64I-NEXT:    neg a0, a0
2854; RV64I-NEXT:    sub a0, a0, a1
2855; RV64I-NEXT:    ret
2856;
2857; RV64ZBA-LABEL: mul_neg5:
2858; RV64ZBA:       # %bb.0:
2859; RV64ZBA-NEXT:    sh2add a0, a0, a0
2860; RV64ZBA-NEXT:    neg a0, a0
2861; RV64ZBA-NEXT:    ret
2862  %c = mul i64 %a, -5
2863  ret i64 %c
2864}
2865
2866define i64 @mul_neg6(i64 %a) {
2867; CHECK-LABEL: mul_neg6:
2868; CHECK:       # %bb.0:
2869; CHECK-NEXT:    li a1, -6
2870; CHECK-NEXT:    mul a0, a0, a1
2871; CHECK-NEXT:    ret
2872  %c = mul i64 %a, -6
2873  ret i64 %c
2874}
2875
2876define i64 @mul_neg7(i64 %a) {
2877; CHECK-LABEL: mul_neg7:
2878; CHECK:       # %bb.0:
2879; CHECK-NEXT:    slli a1, a0, 3
2880; CHECK-NEXT:    sub a0, a0, a1
2881; CHECK-NEXT:    ret
2882  %c = mul i64 %a, -7
2883  ret i64 %c
2884}
2885
2886define i64 @mul_neg8(i64 %a) {
2887; CHECK-LABEL: mul_neg8:
2888; CHECK:       # %bb.0:
2889; CHECK-NEXT:    slli a0, a0, 3
2890; CHECK-NEXT:    neg a0, a0
2891; CHECK-NEXT:    ret
2892  %c = mul i64 %a, -8
2893  ret i64 %c
2894}
2895
2896define i64 @bext_mul12(i32 %1, i32 %2) {
2897; RV64I-LABEL: bext_mul12:
2898; RV64I:       # %bb.0: # %entry
2899; RV64I-NEXT:    srlw a0, a0, a1
2900; RV64I-NEXT:    andi a0, a0, 1
2901; RV64I-NEXT:    slli a1, a0, 2
2902; RV64I-NEXT:    slli a0, a0, 4
2903; RV64I-NEXT:    sub a0, a0, a1
2904; RV64I-NEXT:    ret
2905;
2906; RV64ZBANOZBB-LABEL: bext_mul12:
2907; RV64ZBANOZBB:       # %bb.0: # %entry
2908; RV64ZBANOZBB-NEXT:    srlw a0, a0, a1
2909; RV64ZBANOZBB-NEXT:    andi a0, a0, 1
2910; RV64ZBANOZBB-NEXT:    sh1add a0, a0, a0
2911; RV64ZBANOZBB-NEXT:    slli a0, a0, 2
2912; RV64ZBANOZBB-NEXT:    ret
2913;
2914; RV64ZBAZBBNOZBS-LABEL: bext_mul12:
2915; RV64ZBAZBBNOZBS:       # %bb.0: # %entry
2916; RV64ZBAZBBNOZBS-NEXT:    srlw a0, a0, a1
2917; RV64ZBAZBBNOZBS-NEXT:    andi a0, a0, 1
2918; RV64ZBAZBBNOZBS-NEXT:    sh1add a0, a0, a0
2919; RV64ZBAZBBNOZBS-NEXT:    slli a0, a0, 2
2920; RV64ZBAZBBNOZBS-NEXT:    ret
2921;
2922; RV64ZBAZBBZBS-LABEL: bext_mul12:
2923; RV64ZBAZBBZBS:       # %bb.0: # %entry
2924; RV64ZBAZBBZBS-NEXT:    bext a0, a0, a1
2925; RV64ZBAZBBZBS-NEXT:    sh1add a0, a0, a0
2926; RV64ZBAZBBZBS-NEXT:    slli a0, a0, 2
2927; RV64ZBAZBBZBS-NEXT:    ret
2928entry:
2929  %3 = lshr i32 %1, %2
2930  %4 = and i32 %3, 1
2931  %5 = zext nneg i32 %4 to i64
2932  %6 = mul i64 %5, 12
2933  ret i64 %6
2934}
2935
2936define i64 @bext_mul45(i32 %1, i32 %2) {
2937; RV64I-LABEL: bext_mul45:
2938; RV64I:       # %bb.0: # %entry
2939; RV64I-NEXT:    srlw a0, a0, a1
2940; RV64I-NEXT:    andi a0, a0, 1
2941; RV64I-NEXT:    li a1, 45
2942; RV64I-NEXT:    mul a0, a0, a1
2943; RV64I-NEXT:    ret
2944;
2945; RV64ZBANOZBB-LABEL: bext_mul45:
2946; RV64ZBANOZBB:       # %bb.0: # %entry
2947; RV64ZBANOZBB-NEXT:    srlw a0, a0, a1
2948; RV64ZBANOZBB-NEXT:    andi a0, a0, 1
2949; RV64ZBANOZBB-NEXT:    sh2add a0, a0, a0
2950; RV64ZBANOZBB-NEXT:    sh3add a0, a0, a0
2951; RV64ZBANOZBB-NEXT:    ret
2952;
2953; RV64ZBAZBBNOZBS-LABEL: bext_mul45:
2954; RV64ZBAZBBNOZBS:       # %bb.0: # %entry
2955; RV64ZBAZBBNOZBS-NEXT:    srlw a0, a0, a1
2956; RV64ZBAZBBNOZBS-NEXT:    andi a0, a0, 1
2957; RV64ZBAZBBNOZBS-NEXT:    sh2add a0, a0, a0
2958; RV64ZBAZBBNOZBS-NEXT:    sh3add a0, a0, a0
2959; RV64ZBAZBBNOZBS-NEXT:    ret
2960;
2961; RV64ZBAZBBZBS-LABEL: bext_mul45:
2962; RV64ZBAZBBZBS:       # %bb.0: # %entry
2963; RV64ZBAZBBZBS-NEXT:    bext a0, a0, a1
2964; RV64ZBAZBBZBS-NEXT:    sh2add a0, a0, a0
2965; RV64ZBAZBBZBS-NEXT:    sh3add a0, a0, a0
2966; RV64ZBAZBBZBS-NEXT:    ret
2967entry:
2968  %3 = lshr i32 %1, %2
2969  %4 = and i32 %3, 1
2970  %5 = zext nneg i32 %4 to i64
2971  %6 = mul i64 %5, 45
2972  ret i64 %6
2973}
2974
2975define i64 @bext_mul132(i32 %1, i32 %2) {
2976; RV64I-LABEL: bext_mul132:
2977; RV64I:       # %bb.0: # %entry
2978; RV64I-NEXT:    srlw a0, a0, a1
2979; RV64I-NEXT:    andi a0, a0, 1
2980; RV64I-NEXT:    li a1, 132
2981; RV64I-NEXT:    mul a0, a0, a1
2982; RV64I-NEXT:    ret
2983;
2984; RV64ZBANOZBB-LABEL: bext_mul132:
2985; RV64ZBANOZBB:       # %bb.0: # %entry
2986; RV64ZBANOZBB-NEXT:    srlw a0, a0, a1
2987; RV64ZBANOZBB-NEXT:    andi a0, a0, 1
2988; RV64ZBANOZBB-NEXT:    slli a1, a0, 7
2989; RV64ZBANOZBB-NEXT:    sh2add a0, a0, a1
2990; RV64ZBANOZBB-NEXT:    ret
2991;
2992; RV64ZBAZBBNOZBS-LABEL: bext_mul132:
2993; RV64ZBAZBBNOZBS:       # %bb.0: # %entry
2994; RV64ZBAZBBNOZBS-NEXT:    srlw a0, a0, a1
2995; RV64ZBAZBBNOZBS-NEXT:    andi a0, a0, 1
2996; RV64ZBAZBBNOZBS-NEXT:    slli a1, a0, 7
2997; RV64ZBAZBBNOZBS-NEXT:    sh2add a0, a0, a1
2998; RV64ZBAZBBNOZBS-NEXT:    ret
2999;
3000; RV64ZBAZBBZBS-LABEL: bext_mul132:
3001; RV64ZBAZBBZBS:       # %bb.0: # %entry
3002; RV64ZBAZBBZBS-NEXT:    bext a0, a0, a1
3003; RV64ZBAZBBZBS-NEXT:    slli a1, a0, 7
3004; RV64ZBAZBBZBS-NEXT:    sh2add a0, a0, a1
3005; RV64ZBAZBBZBS-NEXT:    ret
3006entry:
3007  %3 = lshr i32 %1, %2
3008  %4 = and i32 %3, 1
3009  %5 = zext nneg i32 %4 to i64
3010  %6 = mul i64 %5, 132
3011  ret i64 %6
3012}
3013
3014define ptr @gep_lshr_i32(ptr %0, i64 %1) {
3015; RV64I-LABEL: gep_lshr_i32:
3016; RV64I:       # %bb.0: # %entry
3017; RV64I-NEXT:    srli a1, a1, 2
3018; RV64I-NEXT:    li a2, 5
3019; RV64I-NEXT:    slli a2, a2, 36
3020; RV64I-NEXT:    slli a1, a1, 32
3021; RV64I-NEXT:    mulhu a1, a1, a2
3022; RV64I-NEXT:    add a0, a0, a1
3023; RV64I-NEXT:    ret
3024;
3025; RV64ZBA-LABEL: gep_lshr_i32:
3026; RV64ZBA:       # %bb.0: # %entry
3027; RV64ZBA-NEXT:    srli a1, a1, 2
3028; RV64ZBA-NEXT:    slli.uw a1, a1, 4
3029; RV64ZBA-NEXT:    sh2add a1, a1, a1
3030; RV64ZBA-NEXT:    add a0, a0, a1
3031; RV64ZBA-NEXT:    ret
3032entry:
3033  %2 = lshr exact i64 %1, 2
3034  %3 = and i64 %2, 4294967295
3035  %5 = getelementptr [80 x i8], ptr %0, i64 %3
3036  ret ptr %5
3037}
3038
3039define i64 @srli_slliuw(i64 %1) {
3040; RV64I-LABEL: srli_slliuw:
3041; RV64I:       # %bb.0: # %entry
3042; RV64I-NEXT:    slli a0, a0, 2
3043; RV64I-NEXT:    li a1, 1
3044; RV64I-NEXT:    slli a1, a1, 36
3045; RV64I-NEXT:    addi a1, a1, -16
3046; RV64I-NEXT:    and a0, a0, a1
3047; RV64I-NEXT:    ret
3048;
3049; RV64ZBA-LABEL: srli_slliuw:
3050; RV64ZBA:       # %bb.0: # %entry
3051; RV64ZBA-NEXT:    srli a0, a0, 2
3052; RV64ZBA-NEXT:    slli.uw a0, a0, 4
3053; RV64ZBA-NEXT:    ret
3054entry:
3055  %2 = lshr exact i64 %1, 2
3056  %3 = and i64 %2, 4294967295
3057  %4 = shl i64 %3, 4
3058  ret i64 %4
3059}
3060
3061define i64 @srli_slliuw_canonical(i64 %0) {
3062; RV64I-LABEL: srli_slliuw_canonical:
3063; RV64I:       # %bb.0: # %entry
3064; RV64I-NEXT:    slli a0, a0, 2
3065; RV64I-NEXT:    li a1, 1
3066; RV64I-NEXT:    slli a1, a1, 36
3067; RV64I-NEXT:    addi a1, a1, -16
3068; RV64I-NEXT:    and a0, a0, a1
3069; RV64I-NEXT:    ret
3070;
3071; RV64ZBA-LABEL: srli_slliuw_canonical:
3072; RV64ZBA:       # %bb.0: # %entry
3073; RV64ZBA-NEXT:    srli a0, a0, 2
3074; RV64ZBA-NEXT:    slli.uw a0, a0, 4
3075; RV64ZBA-NEXT:    ret
3076entry:
3077  %1 = shl i64 %0, 2
3078  %2 = and i64 %1, 68719476720
3079  ret i64 %2
3080}
3081
3082; Make sure we don't accidentally use slli.uw with a shift of 32.
3083define i64 @srli_slliuw_negative_test(i64 %0) {
3084; CHECK-LABEL: srli_slliuw_negative_test:
3085; CHECK:       # %bb.0: # %entry
3086; CHECK-NEXT:    srli a0, a0, 6
3087; CHECK-NEXT:    slli a0, a0, 32
3088; CHECK-NEXT:    ret
3089entry:
3090  %1 = lshr i64 %0, 6
3091  %2 = shl i64 %1, 32
3092  ret i64 %2
3093}
3094
3095define i64 @srli_slli_i16(i64 %1) {
3096; CHECK-LABEL: srli_slli_i16:
3097; CHECK:       # %bb.0: # %entry
3098; CHECK-NEXT:    slli a0, a0, 2
3099; CHECK-NEXT:    lui a1, 256
3100; CHECK-NEXT:    addiw a1, a1, -16
3101; CHECK-NEXT:    and a0, a0, a1
3102; CHECK-NEXT:    ret
3103entry:
3104  %2 = lshr exact i64 %1, 2
3105  %3 = and i64 %2, 65535
3106  %4 = shl i64 %3, 4
3107  ret i64 %4
3108}
3109
3110define i64 @srli_slliuw_2(i64 %1) {
3111; RV64I-LABEL: srli_slliuw_2:
3112; RV64I:       # %bb.0: # %entry
3113; RV64I-NEXT:    srli a0, a0, 15
3114; RV64I-NEXT:    li a1, 1
3115; RV64I-NEXT:    slli a1, a1, 35
3116; RV64I-NEXT:    addi a1, a1, -8
3117; RV64I-NEXT:    and a0, a0, a1
3118; RV64I-NEXT:    ret
3119;
3120; RV64ZBA-LABEL: srli_slliuw_2:
3121; RV64ZBA:       # %bb.0: # %entry
3122; RV64ZBA-NEXT:    srli a0, a0, 18
3123; RV64ZBA-NEXT:    slli.uw a0, a0, 3
3124; RV64ZBA-NEXT:    ret
3125entry:
3126  %2 = lshr i64 %1, 18
3127  %3 = and i64 %2, 4294967295
3128  %4 = shl i64 %3, 3
3129  ret i64 %4
3130}
3131
3132define i64 @srli_slliuw_canonical_2(i64 %0) {
3133; RV64I-LABEL: srli_slliuw_canonical_2:
3134; RV64I:       # %bb.0: # %entry
3135; RV64I-NEXT:    srli a0, a0, 15
3136; RV64I-NEXT:    li a1, 1
3137; RV64I-NEXT:    slli a1, a1, 35
3138; RV64I-NEXT:    addi a1, a1, -8
3139; RV64I-NEXT:    and a0, a0, a1
3140; RV64I-NEXT:    ret
3141;
3142; RV64ZBA-LABEL: srli_slliuw_canonical_2:
3143; RV64ZBA:       # %bb.0: # %entry
3144; RV64ZBA-NEXT:    srli a0, a0, 18
3145; RV64ZBA-NEXT:    slli.uw a0, a0, 3
3146; RV64ZBA-NEXT:    ret
3147entry:
3148  %1 = lshr i64 %0, 15
3149  %2 = and i64 %1, 34359738360
3150  ret i64 %2
3151}
3152
3153define ptr @srai_srli_sh3add(ptr %0, i64 %1) nounwind {
3154; RV64I-LABEL: srai_srli_sh3add:
3155; RV64I:       # %bb.0: # %entry
3156; RV64I-NEXT:    srai a1, a1, 32
3157; RV64I-NEXT:    srli a1, a1, 6
3158; RV64I-NEXT:    slli a1, a1, 3
3159; RV64I-NEXT:    add a0, a0, a1
3160; RV64I-NEXT:    ret
3161;
3162; RV64ZBA-LABEL: srai_srli_sh3add:
3163; RV64ZBA:       # %bb.0: # %entry
3164; RV64ZBA-NEXT:    srai a1, a1, 32
3165; RV64ZBA-NEXT:    srli a1, a1, 6
3166; RV64ZBA-NEXT:    sh3add a0, a1, a0
3167; RV64ZBA-NEXT:    ret
3168entry:
3169  %2 = ashr i64 %1, 32
3170  %3 = lshr i64 %2, 6
3171  %4 = getelementptr i64, ptr %0, i64 %3
3172  ret ptr %4
3173}
3174
3175define ptr @srai_srli_slli(ptr %0, i64 %1) nounwind {
3176; CHECK-LABEL: srai_srli_slli:
3177; CHECK:       # %bb.0: # %entry
3178; CHECK-NEXT:    srai a1, a1, 32
3179; CHECK-NEXT:    srli a1, a1, 6
3180; CHECK-NEXT:    slli a1, a1, 4
3181; CHECK-NEXT:    add a0, a0, a1
3182; CHECK-NEXT:    ret
3183entry:
3184  %2 = ashr i64 %1, 32
3185  %3 = lshr i64 %2, 6
3186  %4 = getelementptr i128, ptr %0, i64 %3
3187  ret ptr %4
3188}
3189
3190; Negative to make sure the peephole added for srai_srli_slli and
3191; srai_srli_sh3add doesn't break this.
3192define i64 @srai_andi(i64 %x) nounwind {
3193; CHECK-LABEL: srai_andi:
3194; CHECK:       # %bb.0: # %entry
3195; CHECK-NEXT:    srai a0, a0, 8
3196; CHECK-NEXT:    andi a0, a0, -8
3197; CHECK-NEXT:    ret
3198entry:
3199  %y = ashr i64 %x, 8
3200  %z = and i64 %y, -8
3201  ret i64 %z
3202}
3203
3204; Negative to make sure the peephole added for srai_srli_slli and
3205; srai_srli_sh3add doesn't break this.
3206define i64 @srai_lui_and(i64 %x) nounwind {
3207; CHECK-LABEL: srai_lui_and:
3208; CHECK:       # %bb.0: # %entry
3209; CHECK-NEXT:    srai a0, a0, 8
3210; CHECK-NEXT:    lui a1, 1048574
3211; CHECK-NEXT:    and a0, a0, a1
3212; CHECK-NEXT:    ret
3213entry:
3214  %y = ashr i64 %x, 8
3215  %z = and i64 %y, -8192
3216  ret i64 %z
3217}
3218
3219define i64 @add_u32simm32_zextw(i64 %x) nounwind {
3220; RV64I-LABEL: add_u32simm32_zextw:
3221; RV64I:       # %bb.0: # %entry
3222; RV64I-NEXT:    li a1, 1
3223; RV64I-NEXT:    slli a1, a1, 32
3224; RV64I-NEXT:    addi a1, a1, -2
3225; RV64I-NEXT:    add a0, a0, a1
3226; RV64I-NEXT:    addi a1, a1, 1
3227; RV64I-NEXT:    and a0, a0, a1
3228; RV64I-NEXT:    ret
3229;
3230; RV64ZBA-LABEL: add_u32simm32_zextw:
3231; RV64ZBA:       # %bb.0: # %entry
3232; RV64ZBA-NEXT:    addi a0, a0, -2
3233; RV64ZBA-NEXT:    zext.w a0, a0
3234; RV64ZBA-NEXT:    ret
3235entry:
3236  %add = add i64 %x, 4294967294
3237  %and = and i64 %add, 4294967295
3238  ret i64 %and
3239}
3240