xref: /llvm-project/llvm/test/CodeGen/RISCV/shift-masked-shamt.ll (revision b6ea46fe72c2ee192b334be6fffaae35a10f5900)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
3; RUN:   | FileCheck %s -check-prefix=RV32I
4; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
5; RUN:   | FileCheck %s -check-prefix=RV64I
6
7; This test checks that unnecessary masking of shift amount operands is
8; eliminated during instruction selection. The test needs to ensure that the
9; masking is not removed if it may affect the shift amount.
10
11define i32 @sll_redundant_mask(i32 %a, i32 %b) nounwind {
12; RV32I-LABEL: sll_redundant_mask:
13; RV32I:       # %bb.0:
14; RV32I-NEXT:    sll a0, a0, a1
15; RV32I-NEXT:    ret
16;
17; RV64I-LABEL: sll_redundant_mask:
18; RV64I:       # %bb.0:
19; RV64I-NEXT:    sllw a0, a0, a1
20; RV64I-NEXT:    ret
21  %1 = and i32 %b, 31
22  %2 = shl i32 %a, %1
23  ret i32 %2
24}
25
26define i32 @sll_non_redundant_mask(i32 %a, i32 %b) nounwind {
27; RV32I-LABEL: sll_non_redundant_mask:
28; RV32I:       # %bb.0:
29; RV32I-NEXT:    andi a1, a1, 15
30; RV32I-NEXT:    sll a0, a0, a1
31; RV32I-NEXT:    ret
32;
33; RV64I-LABEL: sll_non_redundant_mask:
34; RV64I:       # %bb.0:
35; RV64I-NEXT:    andi a1, a1, 15
36; RV64I-NEXT:    sllw a0, a0, a1
37; RV64I-NEXT:    ret
38  %1 = and i32 %b, 15
39  %2 = shl i32 %a, %1
40  ret i32 %2
41}
42
43define i32 @srl_redundant_mask(i32 %a, i32 %b) nounwind {
44; RV32I-LABEL: srl_redundant_mask:
45; RV32I:       # %bb.0:
46; RV32I-NEXT:    srl a0, a0, a1
47; RV32I-NEXT:    ret
48;
49; RV64I-LABEL: srl_redundant_mask:
50; RV64I:       # %bb.0:
51; RV64I-NEXT:    srlw a0, a0, a1
52; RV64I-NEXT:    ret
53  %1 = and i32 %b, 4095
54  %2 = lshr i32 %a, %1
55  ret i32 %2
56}
57
58define i32 @srl_non_redundant_mask(i32 %a, i32 %b) nounwind {
59; RV32I-LABEL: srl_non_redundant_mask:
60; RV32I:       # %bb.0:
61; RV32I-NEXT:    andi a1, a1, 7
62; RV32I-NEXT:    srl a0, a0, a1
63; RV32I-NEXT:    ret
64;
65; RV64I-LABEL: srl_non_redundant_mask:
66; RV64I:       # %bb.0:
67; RV64I-NEXT:    andi a1, a1, 7
68; RV64I-NEXT:    srlw a0, a0, a1
69; RV64I-NEXT:    ret
70  %1 = and i32 %b, 7
71  %2 = lshr i32 %a, %1
72  ret i32 %2
73}
74
75define i32 @sra_redundant_mask(i32 %a, i32 %b) nounwind {
76; RV32I-LABEL: sra_redundant_mask:
77; RV32I:       # %bb.0:
78; RV32I-NEXT:    sra a0, a0, a1
79; RV32I-NEXT:    ret
80;
81; RV64I-LABEL: sra_redundant_mask:
82; RV64I:       # %bb.0:
83; RV64I-NEXT:    sraw a0, a0, a1
84; RV64I-NEXT:    ret
85  %1 = and i32 %b, 65535
86  %2 = ashr i32 %a, %1
87  ret i32 %2
88}
89
90define i32 @sra_non_redundant_mask(i32 %a, i32 %b) nounwind {
91; RV32I-LABEL: sra_non_redundant_mask:
92; RV32I:       # %bb.0:
93; RV32I-NEXT:    andi a1, a1, 32
94; RV32I-NEXT:    sra a0, a0, a1
95; RV32I-NEXT:    ret
96;
97; RV64I-LABEL: sra_non_redundant_mask:
98; RV64I:       # %bb.0:
99; RV64I-NEXT:    sraw a0, a0, zero
100; RV64I-NEXT:    ret
101  %1 = and i32 %b, 32
102  %2 = ashr i32 %a, %1
103  ret i32 %2
104}
105
106define i32 @sll_redundant_mask_zeros(i32 %a, i32 %b) nounwind {
107; RV32I-LABEL: sll_redundant_mask_zeros:
108; RV32I:       # %bb.0:
109; RV32I-NEXT:    slli a1, a1, 1
110; RV32I-NEXT:    sll a0, a0, a1
111; RV32I-NEXT:    ret
112;
113; RV64I-LABEL: sll_redundant_mask_zeros:
114; RV64I:       # %bb.0:
115; RV64I-NEXT:    slli a1, a1, 1
116; RV64I-NEXT:    sllw a0, a0, a1
117; RV64I-NEXT:    ret
118  %1 = shl i32 %b, 1
119  %2 = and i32 %1, 30
120  %3 = shl i32 %a, %2
121  ret i32 %3
122}
123
124define i32 @srl_redundant_mask_zeros(i32 %a, i32 %b) nounwind {
125; RV32I-LABEL: srl_redundant_mask_zeros:
126; RV32I:       # %bb.0:
127; RV32I-NEXT:    slli a1, a1, 2
128; RV32I-NEXT:    srl a0, a0, a1
129; RV32I-NEXT:    ret
130;
131; RV64I-LABEL: srl_redundant_mask_zeros:
132; RV64I:       # %bb.0:
133; RV64I-NEXT:    slli a1, a1, 2
134; RV64I-NEXT:    srlw a0, a0, a1
135; RV64I-NEXT:    ret
136  %1 = shl i32 %b, 2
137  %2 = and i32 %1, 28
138  %3 = lshr i32 %a, %2
139  ret i32 %3
140}
141
142define i32 @sra_redundant_mask_zeros(i32 %a, i32 %b) nounwind {
143; RV32I-LABEL: sra_redundant_mask_zeros:
144; RV32I:       # %bb.0:
145; RV32I-NEXT:    slli a1, a1, 3
146; RV32I-NEXT:    sra a0, a0, a1
147; RV32I-NEXT:    ret
148;
149; RV64I-LABEL: sra_redundant_mask_zeros:
150; RV64I:       # %bb.0:
151; RV64I-NEXT:    slli a1, a1, 3
152; RV64I-NEXT:    sraw a0, a0, a1
153; RV64I-NEXT:    ret
154  %1 = shl i32 %b, 3
155  %2 = and i32 %1, 24
156  %3 = ashr i32 %a, %2
157  ret i32 %3
158}
159
160define i64 @sll_redundant_mask_zeros_i64(i64 %a, i64 %b) nounwind {
161; RV32I-LABEL: sll_redundant_mask_zeros_i64:
162; RV32I:       # %bb.0:
163; RV32I-NEXT:    slli a2, a2, 2
164; RV32I-NEXT:    andi a4, a2, 60
165; RV32I-NEXT:    addi a3, a4, -32
166; RV32I-NEXT:    bltz a3, .LBB9_2
167; RV32I-NEXT:  # %bb.1:
168; RV32I-NEXT:    sll a1, a0, a4
169; RV32I-NEXT:    j .LBB9_3
170; RV32I-NEXT:  .LBB9_2:
171; RV32I-NEXT:    sll a1, a1, a2
172; RV32I-NEXT:    srli a5, a0, 1
173; RV32I-NEXT:    not a4, a4
174; RV32I-NEXT:    srl a4, a5, a4
175; RV32I-NEXT:    or a1, a1, a4
176; RV32I-NEXT:  .LBB9_3:
177; RV32I-NEXT:    sll a0, a0, a2
178; RV32I-NEXT:    srai a3, a3, 31
179; RV32I-NEXT:    and a0, a3, a0
180; RV32I-NEXT:    ret
181;
182; RV64I-LABEL: sll_redundant_mask_zeros_i64:
183; RV64I:       # %bb.0:
184; RV64I-NEXT:    slli a1, a1, 2
185; RV64I-NEXT:    sll a0, a0, a1
186; RV64I-NEXT:    ret
187  %1 = shl i64 %b, 2
188  %2 = and i64 %1, 60
189  %3 = shl i64 %a, %2
190  ret i64 %3
191}
192
193define i64 @srl_redundant_mask_zeros_i64(i64 %a, i64 %b) nounwind {
194; RV32I-LABEL: srl_redundant_mask_zeros_i64:
195; RV32I:       # %bb.0:
196; RV32I-NEXT:    slli a2, a2, 3
197; RV32I-NEXT:    andi a4, a2, 56
198; RV32I-NEXT:    addi a3, a4, -32
199; RV32I-NEXT:    bltz a3, .LBB10_2
200; RV32I-NEXT:  # %bb.1:
201; RV32I-NEXT:    srl a0, a1, a4
202; RV32I-NEXT:    j .LBB10_3
203; RV32I-NEXT:  .LBB10_2:
204; RV32I-NEXT:    srl a0, a0, a2
205; RV32I-NEXT:    slli a5, a1, 1
206; RV32I-NEXT:    not a4, a4
207; RV32I-NEXT:    sll a4, a5, a4
208; RV32I-NEXT:    or a0, a0, a4
209; RV32I-NEXT:  .LBB10_3:
210; RV32I-NEXT:    srl a1, a1, a2
211; RV32I-NEXT:    srai a3, a3, 31
212; RV32I-NEXT:    and a1, a3, a1
213; RV32I-NEXT:    ret
214;
215; RV64I-LABEL: srl_redundant_mask_zeros_i64:
216; RV64I:       # %bb.0:
217; RV64I-NEXT:    slli a1, a1, 3
218; RV64I-NEXT:    srl a0, a0, a1
219; RV64I-NEXT:    ret
220  %1 = shl i64 %b, 3
221  %2 = and i64 %1, 56
222  %3 = lshr i64 %a, %2
223  ret i64 %3
224}
225
226define i64 @sra_redundant_mask_zeros_i64(i64 %a, i64 %b) nounwind {
227; RV32I-LABEL: sra_redundant_mask_zeros_i64:
228; RV32I:       # %bb.0:
229; RV32I-NEXT:    slli a2, a2, 4
230; RV32I-NEXT:    andi a3, a2, 48
231; RV32I-NEXT:    addi a4, a3, -32
232; RV32I-NEXT:    bltz a4, .LBB11_2
233; RV32I-NEXT:  # %bb.1:
234; RV32I-NEXT:    sra a0, a1, a3
235; RV32I-NEXT:    srai a1, a1, 31
236; RV32I-NEXT:    ret
237; RV32I-NEXT:  .LBB11_2:
238; RV32I-NEXT:    srl a0, a0, a2
239; RV32I-NEXT:    slli a4, a1, 1
240; RV32I-NEXT:    not a3, a3
241; RV32I-NEXT:    sll a3, a4, a3
242; RV32I-NEXT:    or a0, a0, a3
243; RV32I-NEXT:    sra a1, a1, a2
244; RV32I-NEXT:    ret
245;
246; RV64I-LABEL: sra_redundant_mask_zeros_i64:
247; RV64I:       # %bb.0:
248; RV64I-NEXT:    slli a1, a1, 4
249; RV64I-NEXT:    sra a0, a0, a1
250; RV64I-NEXT:    ret
251  %1 = shl i64 %b, 4
252  %2 = and i64 %1, 48
253  %3 = ashr i64 %a, %2
254  ret i64 %3
255}
256