xref: /llvm-project/llvm/test/CodeGen/LoongArch/shift-masked-shamt.ll (revision 9d4f7f44b64d87d1068859906f43b7ce03a7388b)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc --mtriple=loongarch32 -mattr=+d < %s | FileCheck %s --check-prefix=LA32
3; RUN: llc --mtriple=loongarch64 -mattr=+d < %s | FileCheck %s --check-prefix=LA64
4
5;; This test checks that unnecessary masking of shift amount operands is
6;; eliminated during instruction selection. The test needs to ensure that the
7;; masking is not removed if it may affect the shift amount.
8
9define i32 @sll_redundant_mask(i32 %a, i32 %b) {
10; LA32-LABEL: sll_redundant_mask:
11; LA32:       # %bb.0:
12; LA32-NEXT:    sll.w $a0, $a0, $a1
13; LA32-NEXT:    ret
14;
15; LA64-LABEL: sll_redundant_mask:
16; LA64:       # %bb.0:
17; LA64-NEXT:    sll.w $a0, $a0, $a1
18; LA64-NEXT:    ret
19  %1 = and i32 %b, 31
20  %2 = shl i32 %a, %1
21  ret i32 %2
22}
23
24define i32 @sll_non_redundant_mask(i32 %a, i32 %b) {
25; LA32-LABEL: sll_non_redundant_mask:
26; LA32:       # %bb.0:
27; LA32-NEXT:    andi $a1, $a1, 15
28; LA32-NEXT:    sll.w $a0, $a0, $a1
29; LA32-NEXT:    ret
30;
31; LA64-LABEL: sll_non_redundant_mask:
32; LA64:       # %bb.0:
33; LA64-NEXT:    andi $a1, $a1, 15
34; LA64-NEXT:    sll.w $a0, $a0, $a1
35; LA64-NEXT:    ret
36  %1 = and i32 %b, 15
37  %2 = shl i32 %a, %1
38  ret i32 %2
39}
40
41define i32 @srl_redundant_mask(i32 %a, i32 %b) {
42; LA32-LABEL: srl_redundant_mask:
43; LA32:       # %bb.0:
44; LA32-NEXT:    srl.w $a0, $a0, $a1
45; LA32-NEXT:    ret
46;
47; LA64-LABEL: srl_redundant_mask:
48; LA64:       # %bb.0:
49; LA64-NEXT:    srl.w $a0, $a0, $a1
50; LA64-NEXT:    ret
51  %1 = and i32 %b, 4095
52  %2 = lshr i32 %a, %1
53  ret i32 %2
54}
55
56define i32 @srl_non_redundant_mask(i32 %a, i32 %b) {
57; LA32-LABEL: srl_non_redundant_mask:
58; LA32:       # %bb.0:
59; LA32-NEXT:    andi $a1, $a1, 7
60; LA32-NEXT:    srl.w $a0, $a0, $a1
61; LA32-NEXT:    ret
62;
63; LA64-LABEL: srl_non_redundant_mask:
64; LA64:       # %bb.0:
65; LA64-NEXT:    andi $a1, $a1, 7
66; LA64-NEXT:    srl.w $a0, $a0, $a1
67; LA64-NEXT:    ret
68  %1 = and i32 %b, 7
69  %2 = lshr i32 %a, %1
70  ret i32 %2
71}
72
73define i32 @sra_redundant_mask(i32 %a, i32 %b) {
74; LA32-LABEL: sra_redundant_mask:
75; LA32:       # %bb.0:
76; LA32-NEXT:    sra.w $a0, $a0, $a1
77; LA32-NEXT:    ret
78;
79; LA64-LABEL: sra_redundant_mask:
80; LA64:       # %bb.0:
81; LA64-NEXT:    sra.w $a0, $a0, $a1
82; LA64-NEXT:    ret
83  %1 = and i32 %b, 65535
84  %2 = ashr i32 %a, %1
85  ret i32 %2
86}
87
88define i32 @sra_non_redundant_mask(i32 %a, i32 %b) {
89; LA32-LABEL: sra_non_redundant_mask:
90; LA32:       # %bb.0:
91; LA32-NEXT:    andi $a1, $a1, 32
92; LA32-NEXT:    sra.w $a0, $a0, $a1
93; LA32-NEXT:    ret
94;
95; LA64-LABEL: sra_non_redundant_mask:
96; LA64:       # %bb.0:
97; LA64-NEXT:    andi $a1, $a1, 32
98; LA64-NEXT:    sra.w $a0, $a0, $a1
99; LA64-NEXT:    ret
100  %1 = and i32 %b, 32
101  %2 = ashr i32 %a, %1
102  ret i32 %2
103}
104
105define i32 @sll_redundant_mask_zeros(i32 %a, i32 %b) {
106; LA32-LABEL: sll_redundant_mask_zeros:
107; LA32:       # %bb.0:
108; LA32-NEXT:    slli.w $a1, $a1, 1
109; LA32-NEXT:    sll.w $a0, $a0, $a1
110; LA32-NEXT:    ret
111;
112; LA64-LABEL: sll_redundant_mask_zeros:
113; LA64:       # %bb.0:
114; LA64-NEXT:    slli.d $a1, $a1, 1
115; LA64-NEXT:    sll.w $a0, $a0, $a1
116; LA64-NEXT:    ret
117  %1 = shl i32 %b, 1
118  %2 = and i32 %1, 30
119  %3 = shl i32 %a, %2
120  ret i32 %3
121}
122
123define i32 @srl_redundant_mask_zeros(i32 %a, i32 %b) {
124; LA32-LABEL: srl_redundant_mask_zeros:
125; LA32:       # %bb.0:
126; LA32-NEXT:    slli.w $a1, $a1, 2
127; LA32-NEXT:    srl.w $a0, $a0, $a1
128; LA32-NEXT:    ret
129;
130; LA64-LABEL: srl_redundant_mask_zeros:
131; LA64:       # %bb.0:
132; LA64-NEXT:    slli.d $a1, $a1, 2
133; LA64-NEXT:    srl.w $a0, $a0, $a1
134; LA64-NEXT:    ret
135  %1 = shl i32 %b, 2
136  %2 = and i32 %1, 28
137  %3 = lshr i32 %a, %2
138  ret i32 %3
139}
140
141define i32 @sra_redundant_mask_zeros(i32 %a, i32 %b) {
142; LA32-LABEL: sra_redundant_mask_zeros:
143; LA32:       # %bb.0:
144; LA32-NEXT:    slli.w $a1, $a1, 3
145; LA32-NEXT:    sra.w $a0, $a0, $a1
146; LA32-NEXT:    ret
147;
148; LA64-LABEL: sra_redundant_mask_zeros:
149; LA64:       # %bb.0:
150; LA64-NEXT:    slli.d $a1, $a1, 3
151; LA64-NEXT:    sra.w $a0, $a0, $a1
152; LA64-NEXT:    ret
153  %1 = shl i32 %b, 3
154  %2 = and i32 %1, 24
155  %3 = ashr i32 %a, %2
156  ret i32 %3
157}
158
159define i64 @sll_redundant_mask_zeros_i64(i64 %a, i64 %b) {
160; LA32-LABEL: sll_redundant_mask_zeros_i64:
161; LA32:       # %bb.0:
162; LA32-NEXT:    slli.w $a2, $a2, 2
163; LA32-NEXT:    sll.w $a1, $a1, $a2
164; LA32-NEXT:    srli.w $a3, $a0, 1
165; LA32-NEXT:    andi $a4, $a2, 60
166; LA32-NEXT:    xori $a5, $a4, 31
167; LA32-NEXT:    srl.w $a3, $a3, $a5
168; LA32-NEXT:    or $a1, $a1, $a3
169; LA32-NEXT:    addi.w $a3, $a4, -32
170; LA32-NEXT:    slti $a4, $a3, 0
171; LA32-NEXT:    maskeqz $a1, $a1, $a4
172; LA32-NEXT:    sll.w $a5, $a0, $a3
173; LA32-NEXT:    masknez $a4, $a5, $a4
174; LA32-NEXT:    or $a1, $a1, $a4
175; LA32-NEXT:    sll.w $a0, $a0, $a2
176; LA32-NEXT:    srai.w $a2, $a3, 31
177; LA32-NEXT:    and $a0, $a2, $a0
178; LA32-NEXT:    ret
179;
180; LA64-LABEL: sll_redundant_mask_zeros_i64:
181; LA64:       # %bb.0:
182; LA64-NEXT:    slli.d $a1, $a1, 2
183; LA64-NEXT:    sll.d $a0, $a0, $a1
184; LA64-NEXT:    ret
185  %1 = shl i64 %b, 2
186  %2 = and i64 %1, 60
187  %3 = shl i64 %a, %2
188  ret i64 %3
189}
190
191define i64 @srl_redundant_mask_zeros_i64(i64 %a, i64 %b) {
192; LA32-LABEL: srl_redundant_mask_zeros_i64:
193; LA32:       # %bb.0:
194; LA32-NEXT:    slli.w $a2, $a2, 3
195; LA32-NEXT:    srl.w $a0, $a0, $a2
196; LA32-NEXT:    slli.w $a3, $a1, 1
197; LA32-NEXT:    andi $a4, $a2, 56
198; LA32-NEXT:    xori $a5, $a4, 31
199; LA32-NEXT:    sll.w $a3, $a3, $a5
200; LA32-NEXT:    or $a0, $a0, $a3
201; LA32-NEXT:    addi.w $a3, $a4, -32
202; LA32-NEXT:    slti $a4, $a3, 0
203; LA32-NEXT:    maskeqz $a0, $a0, $a4
204; LA32-NEXT:    srl.w $a5, $a1, $a3
205; LA32-NEXT:    masknez $a4, $a5, $a4
206; LA32-NEXT:    or $a0, $a0, $a4
207; LA32-NEXT:    srl.w $a1, $a1, $a2
208; LA32-NEXT:    srai.w $a2, $a3, 31
209; LA32-NEXT:    and $a1, $a2, $a1
210; LA32-NEXT:    ret
211;
212; LA64-LABEL: srl_redundant_mask_zeros_i64:
213; LA64:       # %bb.0:
214; LA64-NEXT:    slli.d $a1, $a1, 3
215; LA64-NEXT:    srl.d $a0, $a0, $a1
216; LA64-NEXT:    ret
217  %1 = shl i64 %b, 3
218  %2 = and i64 %1, 56
219  %3 = lshr i64 %a, %2
220  ret i64 %3
221}
222
223define i64 @sra_redundant_mask_zeros_i64(i64 %a, i64 %b) {
224; LA32-LABEL: sra_redundant_mask_zeros_i64:
225; LA32:       # %bb.0:
226; LA32-NEXT:    srai.w $a3, $a1, 31
227; LA32-NEXT:    slli.w $a4, $a2, 4
228; LA32-NEXT:    andi $a5, $a4, 48
229; LA32-NEXT:    addi.w $a6, $a5, -32
230; LA32-NEXT:    slti $a7, $a6, 0
231; LA32-NEXT:    masknez $a2, $a3, $a7
232; LA32-NEXT:    sra.w $a3, $a1, $a4
233; LA32-NEXT:    maskeqz $a3, $a3, $a7
234; LA32-NEXT:    or $a2, $a3, $a2
235; LA32-NEXT:    srl.w $a0, $a0, $a4
236; LA32-NEXT:    slli.w $a3, $a1, 1
237; LA32-NEXT:    xori $a4, $a5, 31
238; LA32-NEXT:    sll.w $a3, $a3, $a4
239; LA32-NEXT:    or $a0, $a0, $a3
240; LA32-NEXT:    maskeqz $a0, $a0, $a7
241; LA32-NEXT:    sra.w $a1, $a1, $a6
242; LA32-NEXT:    masknez $a1, $a1, $a7
243; LA32-NEXT:    or $a0, $a0, $a1
244; LA32-NEXT:    move $a1, $a2
245; LA32-NEXT:    ret
246;
247; LA64-LABEL: sra_redundant_mask_zeros_i64:
248; LA64:       # %bb.0:
249; LA64-NEXT:    slli.d $a1, $a1, 4
250; LA64-NEXT:    sra.d $a0, $a0, $a1
251; LA64-NEXT:    ret
252  %1 = shl i64 %b, 4
253  %2 = and i64 %1, 48
254  %3 = ashr i64 %a, %2
255  ret i64 %3
256}
257