xref: /llvm-project/llvm/test/CodeGen/AArch64/sshl_sat.ll (revision db158c7c830807caeeb0691739c41f1d522029e9)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s
3
4declare i16 @llvm.sshl.sat.i16(i16, i16)
5declare <4 x i16> @llvm.sshl.sat.v4i16(<4 x i16>, <4 x i16>)
6
7; fold (shlsat undef, x) -> 0
8define i16 @combine_shl_undef(i16 %x, i16 %y) nounwind {
9; CHECK-LABEL: combine_shl_undef:
10; CHECK:       // %bb.0:
11; CHECK-NEXT:    mov w0, wzr
12; CHECK-NEXT:    ret
13  %tmp = call i16 @llvm.sshl.sat.i16(i16 undef, i16 %y)
14  ret i16 %tmp
15}
16
17; fold (shlsat x, undef) -> undef
18define i16 @combine_shl_by_undef(i16 %x, i16 %y) nounwind {
19; CHECK-LABEL: combine_shl_by_undef:
20; CHECK:       // %bb.0:
21; CHECK-NEXT:    ret
22  %tmp = call i16 @llvm.sshl.sat.i16(i16 %x, i16 undef)
23  ret i16 %tmp
24}
25
26; fold (shlsat poison, x) -> 0
27define i16 @combine_shl_poison(i16 %x, i16 %y) nounwind {
28; CHECK-LABEL: combine_shl_poison:
29; CHECK:       // %bb.0:
30; CHECK-NEXT:    mov w0, wzr
31; CHECK-NEXT:    ret
32  %tmp = call i16 @llvm.sshl.sat.i16(i16 poison, i16 %y)
33  ret i16 %tmp
34}
35
36; fold (shlsat x, poison) -> undef
37define i16 @combine_shl_by_poison(i16 %x, i16 %y) nounwind {
38; CHECK-LABEL: combine_shl_by_poison:
39; CHECK:       // %bb.0:
40; CHECK-NEXT:    ret
41  %tmp = call i16 @llvm.sshl.sat.i16(i16 %x, i16 poison)
42  ret i16 %tmp
43}
44
45; fold (shlsat x, bitwidth) -> undef
46define i16 @combine_shl_by_bitwidth(i16 %x, i16 %y) nounwind {
47; CHECK-LABEL: combine_shl_by_bitwidth:
48; CHECK:       // %bb.0:
49; CHECK-NEXT:    ret
50  %tmp = call i16 @llvm.sshl.sat.i16(i16 %x, i16 16)
51  ret i16 %tmp
52}
53
54; fold (shlsat 0, x) -> 0
55define i16 @combine_shl_zero(i16 %x, i16 %y) nounwind {
56; CHECK-LABEL: combine_shl_zero:
57; CHECK:       // %bb.0:
58; CHECK-NEXT:    mov w0, wzr
59; CHECK-NEXT:    ret
60  %tmp = call i16 @llvm.sshl.sat.i16(i16 0, i16 %y)
61  ret i16 %tmp
62}
63
64; fold (shlsat x, 0) -> x
65define i16 @combine_shlsat_by_zero(i16 %x, i16 %y) nounwind {
66; CHECK-LABEL: combine_shlsat_by_zero:
67; CHECK:       // %bb.0:
68; CHECK-NEXT:    ret
69  %tmp = call i16 @llvm.sshl.sat.i16(i16 %x, i16 0)
70  ret i16 %tmp
71}
72
73; fold (shlsat c1, c2) -> c3
74define i16 @combine_shlsat_constfold(i16 %x, i16 %y) nounwind {
75; CHECK-LABEL: combine_shlsat_constfold:
76; CHECK:       // %bb.0:
77; CHECK-NEXT:    mov w0, #32 // =0x20
78; CHECK-NEXT:    ret
79  %tmp = call i16 @llvm.sshl.sat.i16(i16 8, i16 2)
80  ret i16 %tmp
81}
82
83; fold (shlsat c1, c2) -> sat max
84define i16 @combine_shlsat_satmax(i16 %x, i16 %y) nounwind {
85; CHECK-LABEL: combine_shlsat_satmax:
86; CHECK:       // %bb.0:
87; CHECK-NEXT:    mov w0, #32767 // =0x7fff
88; CHECK-NEXT:    ret
89  %tmp = call i16 @llvm.sshl.sat.i16(i16 8, i16 15)
90  ret i16 %tmp
91}
92
93; fold (shlsat c1, c2) -> sat min
94define i16 @combine_shlsat_satmin(i16 %x, i16 %y) nounwind {
95; CHECK-LABEL: combine_shlsat_satmin:
96; CHECK:       // %bb.0:
97; CHECK-NEXT:    mov w0, #32768 // =0x8000
98; CHECK-NEXT:    ret
99  %tmp = call i16 @llvm.sshl.sat.i16(i16 -8, i16 15)
100  ret i16 %tmp
101}
102
103declare void @sink4xi16(i16, i16, i16, i16)
104
105; fold (shlsat c1, c2) -> c3 , c1/c2/c3 being vectors
106define void @combine_shlsat_vector() nounwind {
107; CHECK-LABEL: combine_shlsat_vector:
108; CHECK:       // %bb.0:
109; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
110; CHECK-NEXT:    mov w0, #32 // =0x20
111; CHECK-NEXT:    mov w1, #32767 // =0x7fff
112; CHECK-NEXT:    mov w2, #65504 // =0xffe0
113; CHECK-NEXT:    mov w3, #32768 // =0x8000
114; CHECK-NEXT:    bl sink4xi16
115; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
116; CHECK-NEXT:    ret
117  %tmp = call <4 x i16> @llvm.sshl.sat.v4i16(
118                          <4 x i16><i16 8, i16 8, i16 -8, i16 -8>,
119                          <4 x i16><i16 2, i16 15, i16 2, i16 15>)
120  ; Pass elements as arguments in a call to get CHECK statements that verify
121  ; the constant folding.
122  %e0 = extractelement <4 x i16> %tmp, i16 0
123  %e1 = extractelement <4 x i16> %tmp, i16 1
124  %e2 = extractelement <4 x i16> %tmp, i16 2
125  %e3 = extractelement <4 x i16> %tmp, i16 3
126  call void @sink4xi16(i16 %e0, i16 %e1, i16 %e2, i16 %e3)
127  ret void
128}
129
130; Fold shlsat -> shl, if known not to saturate.
131define i16 @combine_shlsat_to_shl(i16 %x) nounwind {
132; CHECK-LABEL: combine_shlsat_to_shl:
133; CHECK:       // %bb.0:
134; CHECK-NEXT:    and w0, w0, #0xfffffffc
135; CHECK-NEXT:    ret
136  %x2 = ashr i16 %x, 2
137  %tmp = call i16 @llvm.sshl.sat.i16(i16 %x2, i16 2)
138  ret i16 %tmp
139}
140
141; Do not fold shlsat -> shl.
142define i16 @combine_shlsat_to_shl_no_fold(i16 %x) nounwind {
143; CHECK-LABEL: combine_shlsat_to_shl_no_fold:
144; CHECK:       // %bb.0:
145; CHECK-NEXT:    sxth w8, w0
146; CHECK-NEXT:    mov w9, #-65536 // =0xffff0000
147; CHECK-NEXT:    mov w10, #-2147483648 // =0x80000000
148; CHECK-NEXT:    ands w8, w9, w8, lsl #14
149; CHECK-NEXT:    cinv w10, w10, ge
150; CHECK-NEXT:    lsl w9, w8, #3
151; CHECK-NEXT:    cmp w8, w9, asr #3
152; CHECK-NEXT:    csel w8, w10, w9, ne
153; CHECK-NEXT:    asr w0, w8, #16
154; CHECK-NEXT:    ret
155  %x2 = ashr i16 %x, 2
156  %tmp = call i16 @llvm.sshl.sat.i16(i16 %x2, i16 3)
157  ret i16 %tmp
158}
159
160; Fold shlsat -> shl, if known not to saturate.
161define <4 x i16> @combine_shlsat_to_shl_vec(<4 x i8> %a) nounwind {
162; CHECK-LABEL: combine_shlsat_to_shl_vec:
163; CHECK:       // %bb.0:
164; CHECK-NEXT:    shl v0.4h, v0.4h, #8
165; CHECK-NEXT:    sshr v0.4h, v0.4h, #8
166; CHECK-NEXT:    shl v0.4h, v0.4h, #7
167; CHECK-NEXT:    ret
168  %sext = sext <4 x i8> %a to <4 x i16>
169  %tmp = call <4 x i16> @llvm.sshl.sat.v4i16(
170                          <4 x i16> %sext,
171                          <4 x i16> <i16 7, i16 7, i16 7, i16 7>)
172  ret <4 x i16> %tmp
173}
174