xref: /llvm-project/clang/test/Frontend/fixed_point_compound.c (revision c5de4dd1eab00df76c1a68c5f397304ceacb71f2)
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -ffixed-point -triple x86_64-unknown-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefixes=CHECK,SIGNED
3 // RUN: %clang_cc1 -ffixed-point -triple x86_64-unknown-linux-gnu -fpadding-on-unsigned-fixed-point -emit-llvm %s -o - | FileCheck %s --check-prefixes=CHECK,UNSIGNED
4 
5 short _Fract shf;
6 _Accum a;
7 unsigned _Fract uf;
8 unsigned long _Accum ula;
9 
10 _Sat short _Fract sshf;
11 _Sat _Accum sa;
12 _Sat unsigned _Fract suf;
13 _Sat unsigned long _Accum sula;
14 
15 int i;
16 unsigned int u;
17 signed char c;
18 
19 float fl;
20 
21 
22 // CHECK-LABEL: @add_shfa(
23 // CHECK-NEXT:  entry:
24 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @a, align 4
25 // CHECK-NEXT:    [[TMP1:%.*]] = load i8, ptr @shf, align 1
26 // CHECK-NEXT:    [[RESIZE:%.*]] = sext i8 [[TMP1]] to i32
27 // CHECK-NEXT:    [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
28 // CHECK-NEXT:    [[TMP2:%.*]] = add i32 [[UPSCALE]], [[TMP0]]
29 // CHECK-NEXT:    [[DOWNSCALE:%.*]] = ashr i32 [[TMP2]], 8
30 // CHECK-NEXT:    [[RESIZE1:%.*]] = trunc i32 [[DOWNSCALE]] to i8
31 // CHECK-NEXT:    store i8 [[RESIZE1]], ptr @shf, align 1
32 // CHECK-NEXT:    ret void
33 //
add_shfa(void)34 void add_shfa(void) {
35   shf += a;
36 }
37 
38 // SIGNED-LABEL: @add_auf(
39 // SIGNED-NEXT:  entry:
40 // SIGNED-NEXT:    [[TMP0:%.*]] = load i16, ptr @uf, align 2
41 // SIGNED-NEXT:    [[TMP1:%.*]] = load i32, ptr @a, align 4
42 // SIGNED-NEXT:    [[RESIZE:%.*]] = sext i32 [[TMP1]] to i33
43 // SIGNED-NEXT:    [[UPSCALE:%.*]] = shl i33 [[RESIZE]], 1
44 // SIGNED-NEXT:    [[RESIZE1:%.*]] = zext i16 [[TMP0]] to i33
45 // SIGNED-NEXT:    [[TMP2:%.*]] = add i33 [[UPSCALE]], [[RESIZE1]]
46 // SIGNED-NEXT:    [[DOWNSCALE:%.*]] = ashr i33 [[TMP2]], 1
47 // SIGNED-NEXT:    [[RESIZE2:%.*]] = trunc i33 [[DOWNSCALE]] to i32
48 // SIGNED-NEXT:    store i32 [[RESIZE2]], ptr @a, align 4
49 // SIGNED-NEXT:    ret void
50 //
51 // UNSIGNED-LABEL: @add_auf(
52 // UNSIGNED-NEXT:  entry:
53 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, ptr @uf, align 2
54 // UNSIGNED-NEXT:    [[TMP1:%.*]] = load i32, ptr @a, align 4
55 // UNSIGNED-NEXT:    [[RESIZE:%.*]] = zext i16 [[TMP0]] to i32
56 // UNSIGNED-NEXT:    [[TMP2:%.*]] = add i32 [[TMP1]], [[RESIZE]]
57 // UNSIGNED-NEXT:    store i32 [[TMP2]], ptr @a, align 4
58 // UNSIGNED-NEXT:    ret void
59 //
add_auf(void)60 void add_auf(void) {
61   a += uf;
62 }
63 
64 // CHECK-LABEL: @add_ufula(
65 // CHECK-NEXT:  entry:
66 // CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @ula, align 8
67 // CHECK-NEXT:    [[TMP1:%.*]] = load i16, ptr @uf, align 2
68 // CHECK-NEXT:    [[RESIZE:%.*]] = zext i16 [[TMP1]] to i64
69 // CHECK-NEXT:    [[UPSCALE:%.*]] = shl i64 [[RESIZE]], 16
70 // CHECK-NEXT:    [[TMP2:%.*]] = add i64 [[UPSCALE]], [[TMP0]]
71 // CHECK-NEXT:    [[DOWNSCALE:%.*]] = lshr i64 [[TMP2]], 16
72 // CHECK-NEXT:    [[RESIZE1:%.*]] = trunc i64 [[DOWNSCALE]] to i16
73 // CHECK-NEXT:    store i16 [[RESIZE1]], ptr @uf, align 2
74 // CHECK-NEXT:    ret void
75 //
add_ufula(void)76 void add_ufula(void) {
77   uf += ula;
78 }
79 
80 // SIGNED-LABEL: @add_ulashf(
81 // SIGNED-NEXT:  entry:
82 // SIGNED-NEXT:    [[TMP0:%.*]] = load i8, ptr @shf, align 1
83 // SIGNED-NEXT:    [[TMP1:%.*]] = load i64, ptr @ula, align 8
84 // SIGNED-NEXT:    [[RESIZE:%.*]] = zext i64 [[TMP1]] to i65
85 // SIGNED-NEXT:    [[RESIZE1:%.*]] = sext i8 [[TMP0]] to i65
86 // SIGNED-NEXT:    [[UPSCALE:%.*]] = shl i65 [[RESIZE1]], 25
87 // SIGNED-NEXT:    [[TMP2:%.*]] = add i65 [[RESIZE]], [[UPSCALE]]
88 // SIGNED-NEXT:    [[DOWNSCALE:%.*]] = ashr i65 [[TMP2]], 1
89 // SIGNED-NEXT:    [[RESIZE2:%.*]] = trunc i65 [[DOWNSCALE]] to i64
90 // SIGNED-NEXT:    [[UPSCALE3:%.*]] = shl i64 [[RESIZE2]], 1
91 // SIGNED-NEXT:    store i64 [[UPSCALE3]], ptr @ula, align 8
92 // SIGNED-NEXT:    ret void
93 //
94 // UNSIGNED-LABEL: @add_ulashf(
95 // UNSIGNED-NEXT:  entry:
96 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i8, ptr @shf, align 1
97 // UNSIGNED-NEXT:    [[TMP1:%.*]] = load i64, ptr @ula, align 8
98 // UNSIGNED-NEXT:    [[RESIZE:%.*]] = sext i8 [[TMP0]] to i64
99 // UNSIGNED-NEXT:    [[UPSCALE:%.*]] = shl i64 [[RESIZE]], 24
100 // UNSIGNED-NEXT:    [[TMP2:%.*]] = add i64 [[TMP1]], [[UPSCALE]]
101 // UNSIGNED-NEXT:    store i64 [[TMP2]], ptr @ula, align 8
102 // UNSIGNED-NEXT:    ret void
103 //
add_ulashf(void)104 void add_ulashf(void) {
105   ula += shf;
106 }
107 
108 // SIGNED-LABEL: @add_ufshf(
109 // SIGNED-NEXT:  entry:
110 // SIGNED-NEXT:    [[TMP0:%.*]] = load i8, ptr @shf, align 1
111 // SIGNED-NEXT:    [[TMP1:%.*]] = load i16, ptr @uf, align 2
112 // SIGNED-NEXT:    [[RESIZE:%.*]] = zext i16 [[TMP1]] to i17
113 // SIGNED-NEXT:    [[RESIZE1:%.*]] = sext i8 [[TMP0]] to i17
114 // SIGNED-NEXT:    [[UPSCALE:%.*]] = shl i17 [[RESIZE1]], 9
115 // SIGNED-NEXT:    [[TMP2:%.*]] = add i17 [[RESIZE]], [[UPSCALE]]
116 // SIGNED-NEXT:    [[DOWNSCALE:%.*]] = ashr i17 [[TMP2]], 1
117 // SIGNED-NEXT:    [[RESIZE2:%.*]] = trunc i17 [[DOWNSCALE]] to i16
118 // SIGNED-NEXT:    [[UPSCALE3:%.*]] = shl i16 [[RESIZE2]], 1
119 // SIGNED-NEXT:    store i16 [[UPSCALE3]], ptr @uf, align 2
120 // SIGNED-NEXT:    ret void
121 //
122 // UNSIGNED-LABEL: @add_ufshf(
123 // UNSIGNED-NEXT:  entry:
124 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i8, ptr @shf, align 1
125 // UNSIGNED-NEXT:    [[TMP1:%.*]] = load i16, ptr @uf, align 2
126 // UNSIGNED-NEXT:    [[RESIZE:%.*]] = sext i8 [[TMP0]] to i16
127 // UNSIGNED-NEXT:    [[UPSCALE:%.*]] = shl i16 [[RESIZE]], 8
128 // UNSIGNED-NEXT:    [[TMP2:%.*]] = add i16 [[TMP1]], [[UPSCALE]]
129 // UNSIGNED-NEXT:    store i16 [[TMP2]], ptr @uf, align 2
130 // UNSIGNED-NEXT:    ret void
131 //
add_ufshf(void)132 void add_ufshf(void) {
133   uf += shf;
134 }
135 
136 // CHECK-LABEL: @add_ashf(
137 // CHECK-NEXT:  entry:
138 // CHECK-NEXT:    [[TMP0:%.*]] = load i8, ptr @shf, align 1
139 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @a, align 4
140 // CHECK-NEXT:    [[RESIZE:%.*]] = sext i8 [[TMP0]] to i32
141 // CHECK-NEXT:    [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
142 // CHECK-NEXT:    [[TMP2:%.*]] = add i32 [[TMP1]], [[UPSCALE]]
143 // CHECK-NEXT:    store i32 [[TMP2]], ptr @a, align 4
144 // CHECK-NEXT:    ret void
145 //
add_ashf(void)146 void add_ashf(void) {
147   a += shf;
148 }
149 
150 // CHECK-LABEL: @add_ai(
151 // CHECK-NEXT:  entry:
152 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @i, align 4
153 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @a, align 4
154 // CHECK-NEXT:    [[RESIZE:%.*]] = sext i32 [[TMP1]] to i47
155 // CHECK-NEXT:    [[RESIZE1:%.*]] = sext i32 [[TMP0]] to i47
156 // CHECK-NEXT:    [[UPSCALE:%.*]] = shl i47 [[RESIZE1]], 15
157 // CHECK-NEXT:    [[TMP2:%.*]] = add i47 [[RESIZE]], [[UPSCALE]]
158 // CHECK-NEXT:    [[RESIZE2:%.*]] = trunc i47 [[TMP2]] to i32
159 // CHECK-NEXT:    store i32 [[RESIZE2]], ptr @a, align 4
160 // CHECK-NEXT:    ret void
161 //
add_ai(void)162 void add_ai(void) {
163   a += i;
164 }
165 
166 // CHECK-LABEL: @add_au(
167 // CHECK-NEXT:  entry:
168 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @u, align 4
169 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @a, align 4
170 // CHECK-NEXT:    [[RESIZE:%.*]] = sext i32 [[TMP1]] to i48
171 // CHECK-NEXT:    [[RESIZE1:%.*]] = zext i32 [[TMP0]] to i48
172 // CHECK-NEXT:    [[UPSCALE:%.*]] = shl i48 [[RESIZE1]], 15
173 // CHECK-NEXT:    [[TMP2:%.*]] = add i48 [[RESIZE]], [[UPSCALE]]
174 // CHECK-NEXT:    [[RESIZE2:%.*]] = trunc i48 [[TMP2]] to i32
175 // CHECK-NEXT:    store i32 [[RESIZE2]], ptr @a, align 4
176 // CHECK-NEXT:    ret void
177 //
add_au(void)178 void add_au(void) {
179   a += u;
180 }
181 
182 // SIGNED-LABEL: @add_ulai(
183 // SIGNED-NEXT:  entry:
184 // SIGNED-NEXT:    [[TMP0:%.*]] = load i32, ptr @i, align 4
185 // SIGNED-NEXT:    [[TMP1:%.*]] = load i64, ptr @ula, align 8
186 // SIGNED-NEXT:    [[RESIZE:%.*]] = zext i64 [[TMP1]] to i65
187 // SIGNED-NEXT:    [[RESIZE1:%.*]] = sext i32 [[TMP0]] to i65
188 // SIGNED-NEXT:    [[UPSCALE:%.*]] = shl i65 [[RESIZE1]], 32
189 // SIGNED-NEXT:    [[TMP2:%.*]] = add i65 [[RESIZE]], [[UPSCALE]]
190 // SIGNED-NEXT:    [[RESIZE2:%.*]] = trunc i65 [[TMP2]] to i64
191 // SIGNED-NEXT:    store i64 [[RESIZE2]], ptr @ula, align 8
192 // SIGNED-NEXT:    ret void
193 //
194 // UNSIGNED-LABEL: @add_ulai(
195 // UNSIGNED-NEXT:  entry:
196 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i32, ptr @i, align 4
197 // UNSIGNED-NEXT:    [[TMP1:%.*]] = load i64, ptr @ula, align 8
198 // UNSIGNED-NEXT:    [[RESIZE:%.*]] = sext i32 [[TMP0]] to i64
199 // UNSIGNED-NEXT:    [[UPSCALE:%.*]] = shl i64 [[RESIZE]], 31
200 // UNSIGNED-NEXT:    [[TMP2:%.*]] = add i64 [[TMP1]], [[UPSCALE]]
201 // UNSIGNED-NEXT:    store i64 [[TMP2]], ptr @ula, align 8
202 // UNSIGNED-NEXT:    ret void
203 //
add_ulai(void)204 void add_ulai(void) {
205   ula += i;
206 }
207 
208 // SIGNED-LABEL: @add_iula(
209 // SIGNED-NEXT:  entry:
210 // SIGNED-NEXT:    [[TMP0:%.*]] = load i64, ptr @ula, align 8
211 // SIGNED-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4
212 // SIGNED-NEXT:    [[RESIZE:%.*]] = sext i32 [[TMP1]] to i65
213 // SIGNED-NEXT:    [[UPSCALE:%.*]] = shl i65 [[RESIZE]], 32
214 // SIGNED-NEXT:    [[RESIZE1:%.*]] = zext i64 [[TMP0]] to i65
215 // SIGNED-NEXT:    [[TMP2:%.*]] = add i65 [[UPSCALE]], [[RESIZE1]]
216 // SIGNED-NEXT:    [[RESIZE2:%.*]] = trunc i65 [[TMP2]] to i64
217 // SIGNED-NEXT:    [[DOWNSCALE:%.*]] = lshr i64 [[RESIZE2]], 32
218 // SIGNED-NEXT:    [[RESIZE3:%.*]] = trunc i64 [[DOWNSCALE]] to i32
219 // SIGNED-NEXT:    store i32 [[RESIZE3]], ptr @i, align 4
220 // SIGNED-NEXT:    ret void
221 //
222 // UNSIGNED-LABEL: @add_iula(
223 // UNSIGNED-NEXT:  entry:
224 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i64, ptr @ula, align 8
225 // UNSIGNED-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4
226 // UNSIGNED-NEXT:    [[RESIZE:%.*]] = sext i32 [[TMP1]] to i64
227 // UNSIGNED-NEXT:    [[UPSCALE:%.*]] = shl i64 [[RESIZE]], 31
228 // UNSIGNED-NEXT:    [[TMP2:%.*]] = add i64 [[UPSCALE]], [[TMP0]]
229 // UNSIGNED-NEXT:    [[DOWNSCALE:%.*]] = lshr i64 [[TMP2]], 31
230 // UNSIGNED-NEXT:    [[RESIZE1:%.*]] = trunc i64 [[DOWNSCALE]] to i32
231 // UNSIGNED-NEXT:    store i32 [[RESIZE1]], ptr @i, align 4
232 // UNSIGNED-NEXT:    ret void
233 //
add_iula(void)234 void add_iula(void) {
235   i += ula;
236 }
237 
238 // CHECK-LABEL: @add_ca(
239 // CHECK-NEXT:  entry:
240 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @a, align 4
241 // CHECK-NEXT:    [[TMP1:%.*]] = load i8, ptr @c, align 1
242 // CHECK-NEXT:    [[CONV:%.*]] = sext i8 [[TMP1]] to i32
243 // CHECK-NEXT:    [[RESIZE:%.*]] = sext i32 [[CONV]] to i47
244 // CHECK-NEXT:    [[UPSCALE:%.*]] = shl i47 [[RESIZE]], 15
245 // CHECK-NEXT:    [[RESIZE1:%.*]] = sext i32 [[TMP0]] to i47
246 // CHECK-NEXT:    [[TMP2:%.*]] = add i47 [[UPSCALE]], [[RESIZE1]]
247 // CHECK-NEXT:    [[RESIZE2:%.*]] = trunc i47 [[TMP2]] to i32
248 // CHECK-NEXT:    [[TMP3:%.*]] = icmp slt i32 [[RESIZE2]], 0
249 // CHECK-NEXT:    [[TMP4:%.*]] = add i32 [[RESIZE2]], 32767
250 // CHECK-NEXT:    [[TMP5:%.*]] = select i1 [[TMP3]], i32 [[TMP4]], i32 [[RESIZE2]]
251 // CHECK-NEXT:    [[DOWNSCALE:%.*]] = ashr i32 [[TMP5]], 15
252 // CHECK-NEXT:    [[RESIZE3:%.*]] = trunc i32 [[DOWNSCALE]] to i8
253 // CHECK-NEXT:    store i8 [[RESIZE3]], ptr @c, align 1
254 // CHECK-NEXT:    ret void
255 //
add_ca(void)256 void add_ca(void) {
257   c += a;
258 }
259 
260 // CHECK-LABEL: @add_sai(
261 // CHECK-NEXT:  entry:
262 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @i, align 4
263 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @sa, align 4
264 // CHECK-NEXT:    [[RESIZE:%.*]] = sext i32 [[TMP1]] to i47
265 // CHECK-NEXT:    [[RESIZE1:%.*]] = sext i32 [[TMP0]] to i47
266 // CHECK-NEXT:    [[UPSCALE:%.*]] = shl i47 [[RESIZE1]], 15
267 // CHECK-NEXT:    [[TMP2:%.*]] = call i47 @llvm.sadd.sat.i47(i47 [[RESIZE]], i47 [[UPSCALE]])
268 // CHECK-NEXT:    [[TMP3:%.*]] = icmp sgt i47 [[TMP2]], 2147483647
269 // CHECK-NEXT:    [[SATMAX:%.*]] = select i1 [[TMP3]], i47 2147483647, i47 [[TMP2]]
270 // CHECK-NEXT:    [[TMP4:%.*]] = icmp slt i47 [[SATMAX]], -2147483648
271 // CHECK-NEXT:    [[SATMIN:%.*]] = select i1 [[TMP4]], i47 -2147483648, i47 [[SATMAX]]
272 // CHECK-NEXT:    [[RESIZE2:%.*]] = trunc i47 [[SATMIN]] to i32
273 // CHECK-NEXT:    store i32 [[RESIZE2]], ptr @sa, align 4
274 // CHECK-NEXT:    ret void
275 //
add_sai(void)276 void add_sai(void) {
277   sa += i;
278 }
279 
280 // CHECK-LABEL: @add_csa(
281 // CHECK-NEXT:  entry:
282 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @sa, align 4
283 // CHECK-NEXT:    [[TMP1:%.*]] = load i8, ptr @c, align 1
284 // CHECK-NEXT:    [[CONV:%.*]] = sext i8 [[TMP1]] to i32
285 // CHECK-NEXT:    [[RESIZE:%.*]] = sext i32 [[CONV]] to i47
286 // CHECK-NEXT:    [[UPSCALE:%.*]] = shl i47 [[RESIZE]], 15
287 // CHECK-NEXT:    [[RESIZE1:%.*]] = sext i32 [[TMP0]] to i47
288 // CHECK-NEXT:    [[TMP2:%.*]] = call i47 @llvm.sadd.sat.i47(i47 [[UPSCALE]], i47 [[RESIZE1]])
289 // CHECK-NEXT:    [[TMP3:%.*]] = icmp sgt i47 [[TMP2]], 2147483647
290 // CHECK-NEXT:    [[SATMAX:%.*]] = select i1 [[TMP3]], i47 2147483647, i47 [[TMP2]]
291 // CHECK-NEXT:    [[TMP4:%.*]] = icmp slt i47 [[SATMAX]], -2147483648
292 // CHECK-NEXT:    [[SATMIN:%.*]] = select i1 [[TMP4]], i47 -2147483648, i47 [[SATMAX]]
293 // CHECK-NEXT:    [[RESIZE2:%.*]] = trunc i47 [[SATMIN]] to i32
294 // CHECK-NEXT:    [[TMP5:%.*]] = icmp slt i32 [[RESIZE2]], 0
295 // CHECK-NEXT:    [[TMP6:%.*]] = add i32 [[RESIZE2]], 32767
296 // CHECK-NEXT:    [[TMP7:%.*]] = select i1 [[TMP5]], i32 [[TMP6]], i32 [[RESIZE2]]
297 // CHECK-NEXT:    [[DOWNSCALE:%.*]] = ashr i32 [[TMP7]], 15
298 // CHECK-NEXT:    [[RESIZE3:%.*]] = trunc i32 [[DOWNSCALE]] to i8
299 // CHECK-NEXT:    store i8 [[RESIZE3]], ptr @c, align 1
300 // CHECK-NEXT:    ret void
301 //
add_csa(void)302 void add_csa(void) {
303   c += sa;
304 }
305 
306 // SIGNED-LABEL: @add_sulau(
307 // SIGNED-NEXT:  entry:
308 // SIGNED-NEXT:    [[TMP0:%.*]] = load i32, ptr @u, align 4
309 // SIGNED-NEXT:    [[TMP1:%.*]] = load i64, ptr @sula, align 8
310 // SIGNED-NEXT:    [[RESIZE:%.*]] = zext i32 [[TMP0]] to i64
311 // SIGNED-NEXT:    [[UPSCALE:%.*]] = shl i64 [[RESIZE]], 32
312 // SIGNED-NEXT:    [[TMP2:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[TMP1]], i64 [[UPSCALE]])
313 // SIGNED-NEXT:    store i64 [[TMP2]], ptr @sula, align 8
314 // SIGNED-NEXT:    ret void
315 //
316 // UNSIGNED-LABEL: @add_sulau(
317 // UNSIGNED-NEXT:  entry:
318 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i32, ptr @u, align 4
319 // UNSIGNED-NEXT:    [[TMP1:%.*]] = load i64, ptr @sula, align 8
320 // UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i64 [[TMP1]] to i63
321 // UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i32 [[TMP0]] to i63
322 // UNSIGNED-NEXT:    [[UPSCALE:%.*]] = shl i63 [[RESIZE1]], 31
323 // UNSIGNED-NEXT:    [[TMP2:%.*]] = call i63 @llvm.uadd.sat.i63(i63 [[RESIZE]], i63 [[UPSCALE]])
324 // UNSIGNED-NEXT:    [[RESIZE2:%.*]] = zext i63 [[TMP2]] to i64
325 // UNSIGNED-NEXT:    store i64 [[RESIZE2]], ptr @sula, align 8
326 // UNSIGNED-NEXT:    ret void
327 //
add_sulau(void)328 void add_sulau(void) {
329   sula += u;
330 }
331 
332 // SIGNED-LABEL: @add_sshsuf(
333 // SIGNED-NEXT:  entry:
334 // SIGNED-NEXT:    [[TMP0:%.*]] = load i16, ptr @suf, align 2
335 // SIGNED-NEXT:    [[TMP1:%.*]] = load i8, ptr @sshf, align 1
336 // SIGNED-NEXT:    [[RESIZE:%.*]] = sext i8 [[TMP1]] to i17
337 // SIGNED-NEXT:    [[UPSCALE:%.*]] = shl i17 [[RESIZE]], 9
338 // SIGNED-NEXT:    [[RESIZE1:%.*]] = zext i16 [[TMP0]] to i17
339 // SIGNED-NEXT:    [[TMP2:%.*]] = call i17 @llvm.sadd.sat.i17(i17 [[UPSCALE]], i17 [[RESIZE1]])
340 // SIGNED-NEXT:    [[DOWNSCALE:%.*]] = ashr i17 [[TMP2]], 1
341 // SIGNED-NEXT:    [[RESIZE2:%.*]] = trunc i17 [[DOWNSCALE]] to i16
342 // SIGNED-NEXT:    [[DOWNSCALE3:%.*]] = ashr i16 [[RESIZE2]], 8
343 // SIGNED-NEXT:    [[RESIZE4:%.*]] = trunc i16 [[DOWNSCALE3]] to i8
344 // SIGNED-NEXT:    store i8 [[RESIZE4]], ptr @sshf, align 1
345 // SIGNED-NEXT:    ret void
346 //
347 // UNSIGNED-LABEL: @add_sshsuf(
348 // UNSIGNED-NEXT:  entry:
349 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, ptr @suf, align 2
350 // UNSIGNED-NEXT:    [[TMP1:%.*]] = load i8, ptr @sshf, align 1
351 // UNSIGNED-NEXT:    [[RESIZE:%.*]] = sext i8 [[TMP1]] to i16
352 // UNSIGNED-NEXT:    [[UPSCALE:%.*]] = shl i16 [[RESIZE]], 8
353 // UNSIGNED-NEXT:    [[TMP2:%.*]] = call i16 @llvm.sadd.sat.i16(i16 [[UPSCALE]], i16 [[TMP0]])
354 // UNSIGNED-NEXT:    [[DOWNSCALE:%.*]] = ashr i16 [[TMP2]], 8
355 // UNSIGNED-NEXT:    [[RESIZE1:%.*]] = trunc i16 [[DOWNSCALE]] to i8
356 // UNSIGNED-NEXT:    store i8 [[RESIZE1]], ptr @sshf, align 1
357 // UNSIGNED-NEXT:    ret void
358 //
add_sshsuf(void)359 void add_sshsuf(void) {
360   sshf += suf;
361 }
362 
363 // CHECK-LABEL: @add_afl(
364 // CHECK-NEXT:  entry:
365 // CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr @fl, align 4
366 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @a, align 4
367 // CHECK-NEXT:    [[TMP2:%.*]] = sitofp i32 [[TMP1]] to float
368 // CHECK-NEXT:    [[TMP3:%.*]] = fmul float [[TMP2]], 0x3F00000000000000
369 // CHECK-NEXT:    [[ADD:%.*]] = fadd float [[TMP3]], [[TMP0]]
370 // CHECK-NEXT:    [[TMP4:%.*]] = fmul float [[ADD]], 3.276800e+04
371 // CHECK-NEXT:    [[TMP5:%.*]] = fptosi float [[TMP4]] to i32
372 // CHECK-NEXT:    store i32 [[TMP5]], ptr @a, align 4
373 // CHECK-NEXT:    ret void
374 //
add_afl(void)375 void add_afl(void) {
376   a += fl;
377 }
378 
379 // CHECK-LABEL: @add_fla(
380 // CHECK-NEXT:  entry:
381 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @a, align 4
382 // CHECK-NEXT:    [[TMP1:%.*]] = sitofp i32 [[TMP0]] to float
383 // CHECK-NEXT:    [[TMP2:%.*]] = fmul float [[TMP1]], 0x3F00000000000000
384 // CHECK-NEXT:    [[TMP3:%.*]] = load float, ptr @fl, align 4
385 // CHECK-NEXT:    [[ADD:%.*]] = fadd float [[TMP3]], [[TMP2]]
386 // CHECK-NEXT:    store float [[ADD]], ptr @fl, align 4
387 // CHECK-NEXT:    ret void
388 //
add_fla(void)389 void add_fla(void) {
390   fl += a;
391 }
392 
393 // CHECK-LABEL: @add_safl(
394 // CHECK-NEXT:  entry:
395 // CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr @fl, align 4
396 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @sa, align 4
397 // CHECK-NEXT:    [[TMP2:%.*]] = sitofp i32 [[TMP1]] to float
398 // CHECK-NEXT:    [[TMP3:%.*]] = fmul float [[TMP2]], 0x3F00000000000000
399 // CHECK-NEXT:    [[ADD:%.*]] = fadd float [[TMP3]], [[TMP0]]
400 // CHECK-NEXT:    [[TMP4:%.*]] = fmul float [[ADD]], 3.276800e+04
401 // CHECK-NEXT:    [[TMP5:%.*]] = call i32 @llvm.fptosi.sat.i32.f32(float [[TMP4]])
402 // CHECK-NEXT:    store i32 [[TMP5]], ptr @sa, align 4
403 // CHECK-NEXT:    ret void
404 //
add_safl(void)405 void add_safl(void) {
406   sa += fl;
407 }
408 
409 // CHECK-LABEL: @add_flsa(
410 // CHECK-NEXT:  entry:
411 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @sa, align 4
412 // CHECK-NEXT:    [[TMP1:%.*]] = sitofp i32 [[TMP0]] to float
413 // CHECK-NEXT:    [[TMP2:%.*]] = fmul float [[TMP1]], 0x3F00000000000000
414 // CHECK-NEXT:    [[TMP3:%.*]] = load float, ptr @fl, align 4
415 // CHECK-NEXT:    [[ADD:%.*]] = fadd float [[TMP3]], [[TMP2]]
416 // CHECK-NEXT:    store float [[ADD]], ptr @fl, align 4
417 // CHECK-NEXT:    ret void
418 //
add_flsa(void)419 void add_flsa(void) {
420   fl += sa;
421 }
422 
423 // Subtraction, multiplication and division should work about the same, so
424 // just make sure we can do them.
425 
426 // SIGNED-LABEL: @sub_auf(
427 // SIGNED-NEXT:  entry:
428 // SIGNED-NEXT:    [[TMP0:%.*]] = load i16, ptr @uf, align 2
429 // SIGNED-NEXT:    [[TMP1:%.*]] = load i32, ptr @a, align 4
430 // SIGNED-NEXT:    [[RESIZE:%.*]] = sext i32 [[TMP1]] to i33
431 // SIGNED-NEXT:    [[UPSCALE:%.*]] = shl i33 [[RESIZE]], 1
432 // SIGNED-NEXT:    [[RESIZE1:%.*]] = zext i16 [[TMP0]] to i33
433 // SIGNED-NEXT:    [[TMP2:%.*]] = sub i33 [[UPSCALE]], [[RESIZE1]]
434 // SIGNED-NEXT:    [[DOWNSCALE:%.*]] = ashr i33 [[TMP2]], 1
435 // SIGNED-NEXT:    [[RESIZE2:%.*]] = trunc i33 [[DOWNSCALE]] to i32
436 // SIGNED-NEXT:    store i32 [[RESIZE2]], ptr @a, align 4
437 // SIGNED-NEXT:    ret void
438 //
439 // UNSIGNED-LABEL: @sub_auf(
440 // UNSIGNED-NEXT:  entry:
441 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, ptr @uf, align 2
442 // UNSIGNED-NEXT:    [[TMP1:%.*]] = load i32, ptr @a, align 4
443 // UNSIGNED-NEXT:    [[RESIZE:%.*]] = zext i16 [[TMP0]] to i32
444 // UNSIGNED-NEXT:    [[TMP2:%.*]] = sub i32 [[TMP1]], [[RESIZE]]
445 // UNSIGNED-NEXT:    store i32 [[TMP2]], ptr @a, align 4
446 // UNSIGNED-NEXT:    ret void
447 //
sub_auf(void)448 void sub_auf(void) {
449   a -= uf;
450 }
451 
452 // CHECK-LABEL: @sub_ai(
453 // CHECK-NEXT:  entry:
454 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @i, align 4
455 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @a, align 4
456 // CHECK-NEXT:    [[RESIZE:%.*]] = sext i32 [[TMP1]] to i47
457 // CHECK-NEXT:    [[RESIZE1:%.*]] = sext i32 [[TMP0]] to i47
458 // CHECK-NEXT:    [[UPSCALE:%.*]] = shl i47 [[RESIZE1]], 15
459 // CHECK-NEXT:    [[TMP2:%.*]] = sub i47 [[RESIZE]], [[UPSCALE]]
460 // CHECK-NEXT:    [[RESIZE2:%.*]] = trunc i47 [[TMP2]] to i32
461 // CHECK-NEXT:    store i32 [[RESIZE2]], ptr @a, align 4
462 // CHECK-NEXT:    ret void
463 //
sub_ai(void)464 void sub_ai(void) {
465   a -= i;
466 }
467 
468 // CHECK-LABEL: @sub_csa(
469 // CHECK-NEXT:  entry:
470 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @sa, align 4
471 // CHECK-NEXT:    [[TMP1:%.*]] = load i8, ptr @c, align 1
472 // CHECK-NEXT:    [[CONV:%.*]] = sext i8 [[TMP1]] to i32
473 // CHECK-NEXT:    [[RESIZE:%.*]] = sext i32 [[CONV]] to i47
474 // CHECK-NEXT:    [[UPSCALE:%.*]] = shl i47 [[RESIZE]], 15
475 // CHECK-NEXT:    [[RESIZE1:%.*]] = sext i32 [[TMP0]] to i47
476 // CHECK-NEXT:    [[TMP2:%.*]] = call i47 @llvm.ssub.sat.i47(i47 [[UPSCALE]], i47 [[RESIZE1]])
477 // CHECK-NEXT:    [[TMP3:%.*]] = icmp sgt i47 [[TMP2]], 2147483647
478 // CHECK-NEXT:    [[SATMAX:%.*]] = select i1 [[TMP3]], i47 2147483647, i47 [[TMP2]]
479 // CHECK-NEXT:    [[TMP4:%.*]] = icmp slt i47 [[SATMAX]], -2147483648
480 // CHECK-NEXT:    [[SATMIN:%.*]] = select i1 [[TMP4]], i47 -2147483648, i47 [[SATMAX]]
481 // CHECK-NEXT:    [[RESIZE2:%.*]] = trunc i47 [[SATMIN]] to i32
482 // CHECK-NEXT:    [[TMP5:%.*]] = icmp slt i32 [[RESIZE2]], 0
483 // CHECK-NEXT:    [[TMP6:%.*]] = add i32 [[RESIZE2]], 32767
484 // CHECK-NEXT:    [[TMP7:%.*]] = select i1 [[TMP5]], i32 [[TMP6]], i32 [[RESIZE2]]
485 // CHECK-NEXT:    [[DOWNSCALE:%.*]] = ashr i32 [[TMP7]], 15
486 // CHECK-NEXT:    [[RESIZE3:%.*]] = trunc i32 [[DOWNSCALE]] to i8
487 // CHECK-NEXT:    store i8 [[RESIZE3]], ptr @c, align 1
488 // CHECK-NEXT:    ret void
489 //
sub_csa(void)490 void sub_csa(void) {
491   c -= sa;
492 }
493 
494 // CHECK-LABEL: @sub_afl(
495 // CHECK-NEXT:  entry:
496 // CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr @fl, align 4
497 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @a, align 4
498 // CHECK-NEXT:    [[TMP2:%.*]] = sitofp i32 [[TMP1]] to float
499 // CHECK-NEXT:    [[TMP3:%.*]] = fmul float [[TMP2]], 0x3F00000000000000
500 // CHECK-NEXT:    [[SUB:%.*]] = fsub float [[TMP3]], [[TMP0]]
501 // CHECK-NEXT:    [[TMP4:%.*]] = fmul float [[SUB]], 3.276800e+04
502 // CHECK-NEXT:    [[TMP5:%.*]] = fptosi float [[TMP4]] to i32
503 // CHECK-NEXT:    store i32 [[TMP5]], ptr @a, align 4
504 // CHECK-NEXT:    ret void
505 //
sub_afl(void)506 void sub_afl(void) {
507   a -= fl;
508 }
509 
510 
511 // SIGNED-LABEL: @mul_auf(
512 // SIGNED-NEXT:  entry:
513 // SIGNED-NEXT:    [[TMP0:%.*]] = load i16, ptr @uf, align 2
514 // SIGNED-NEXT:    [[TMP1:%.*]] = load i32, ptr @a, align 4
515 // SIGNED-NEXT:    [[RESIZE:%.*]] = sext i32 [[TMP1]] to i33
516 // SIGNED-NEXT:    [[UPSCALE:%.*]] = shl i33 [[RESIZE]], 1
517 // SIGNED-NEXT:    [[RESIZE1:%.*]] = zext i16 [[TMP0]] to i33
518 // SIGNED-NEXT:    [[TMP2:%.*]] = call i33 @llvm.smul.fix.i33(i33 [[UPSCALE]], i33 [[RESIZE1]], i32 16)
519 // SIGNED-NEXT:    [[DOWNSCALE:%.*]] = ashr i33 [[TMP2]], 1
520 // SIGNED-NEXT:    [[RESIZE2:%.*]] = trunc i33 [[DOWNSCALE]] to i32
521 // SIGNED-NEXT:    store i32 [[RESIZE2]], ptr @a, align 4
522 // SIGNED-NEXT:    ret void
523 //
524 // UNSIGNED-LABEL: @mul_auf(
525 // UNSIGNED-NEXT:  entry:
526 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, ptr @uf, align 2
527 // UNSIGNED-NEXT:    [[TMP1:%.*]] = load i32, ptr @a, align 4
528 // UNSIGNED-NEXT:    [[RESIZE:%.*]] = zext i16 [[TMP0]] to i32
529 // UNSIGNED-NEXT:    [[TMP2:%.*]] = call i32 @llvm.smul.fix.i32(i32 [[TMP1]], i32 [[RESIZE]], i32 15)
530 // UNSIGNED-NEXT:    store i32 [[TMP2]], ptr @a, align 4
531 // UNSIGNED-NEXT:    ret void
532 //
mul_auf(void)533 void mul_auf(void) {
534   a *= uf;
535 }
536 
537 // CHECK-LABEL: @mul_ai(
538 // CHECK-NEXT:  entry:
539 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @i, align 4
540 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @a, align 4
541 // CHECK-NEXT:    [[RESIZE:%.*]] = sext i32 [[TMP1]] to i47
542 // CHECK-NEXT:    [[RESIZE1:%.*]] = sext i32 [[TMP0]] to i47
543 // CHECK-NEXT:    [[UPSCALE:%.*]] = shl i47 [[RESIZE1]], 15
544 // CHECK-NEXT:    [[TMP2:%.*]] = call i47 @llvm.smul.fix.i47(i47 [[RESIZE]], i47 [[UPSCALE]], i32 15)
545 // CHECK-NEXT:    [[RESIZE2:%.*]] = trunc i47 [[TMP2]] to i32
546 // CHECK-NEXT:    store i32 [[RESIZE2]], ptr @a, align 4
547 // CHECK-NEXT:    ret void
548 //
mul_ai(void)549 void mul_ai(void) {
550   a *= i;
551 }
552 
553 // CHECK-LABEL: @mul_csa(
554 // CHECK-NEXT:  entry:
555 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @sa, align 4
556 // CHECK-NEXT:    [[TMP1:%.*]] = load i8, ptr @c, align 1
557 // CHECK-NEXT:    [[CONV:%.*]] = sext i8 [[TMP1]] to i32
558 // CHECK-NEXT:    [[RESIZE:%.*]] = sext i32 [[CONV]] to i47
559 // CHECK-NEXT:    [[UPSCALE:%.*]] = shl i47 [[RESIZE]], 15
560 // CHECK-NEXT:    [[RESIZE1:%.*]] = sext i32 [[TMP0]] to i47
561 // CHECK-NEXT:    [[TMP2:%.*]] = call i47 @llvm.smul.fix.sat.i47(i47 [[UPSCALE]], i47 [[RESIZE1]], i32 15)
562 // CHECK-NEXT:    [[TMP3:%.*]] = icmp sgt i47 [[TMP2]], 2147483647
563 // CHECK-NEXT:    [[SATMAX:%.*]] = select i1 [[TMP3]], i47 2147483647, i47 [[TMP2]]
564 // CHECK-NEXT:    [[TMP4:%.*]] = icmp slt i47 [[SATMAX]], -2147483648
565 // CHECK-NEXT:    [[SATMIN:%.*]] = select i1 [[TMP4]], i47 -2147483648, i47 [[SATMAX]]
566 // CHECK-NEXT:    [[RESIZE2:%.*]] = trunc i47 [[SATMIN]] to i32
567 // CHECK-NEXT:    [[TMP5:%.*]] = icmp slt i32 [[RESIZE2]], 0
568 // CHECK-NEXT:    [[TMP6:%.*]] = add i32 [[RESIZE2]], 32767
569 // CHECK-NEXT:    [[TMP7:%.*]] = select i1 [[TMP5]], i32 [[TMP6]], i32 [[RESIZE2]]
570 // CHECK-NEXT:    [[DOWNSCALE:%.*]] = ashr i32 [[TMP7]], 15
571 // CHECK-NEXT:    [[RESIZE3:%.*]] = trunc i32 [[DOWNSCALE]] to i8
572 // CHECK-NEXT:    store i8 [[RESIZE3]], ptr @c, align 1
573 // CHECK-NEXT:    ret void
574 //
mul_csa(void)575 void mul_csa(void) {
576   c *= sa;
577 }
578 
579 // CHECK-LABEL: @mul_afl(
580 // CHECK-NEXT:  entry:
581 // CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr @fl, align 4
582 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @a, align 4
583 // CHECK-NEXT:    [[TMP2:%.*]] = sitofp i32 [[TMP1]] to float
584 // CHECK-NEXT:    [[TMP3:%.*]] = fmul float [[TMP2]], 0x3F00000000000000
585 // CHECK-NEXT:    [[MUL:%.*]] = fmul float [[TMP3]], [[TMP0]]
586 // CHECK-NEXT:    [[TMP4:%.*]] = fmul float [[MUL]], 3.276800e+04
587 // CHECK-NEXT:    [[TMP5:%.*]] = fptosi float [[TMP4]] to i32
588 // CHECK-NEXT:    store i32 [[TMP5]], ptr @a, align 4
589 // CHECK-NEXT:    ret void
590 //
mul_afl(void)591 void mul_afl(void) {
592   a *= fl;
593 }
594 
595 
596 // SIGNED-LABEL: @div_auf(
597 // SIGNED-NEXT:  entry:
598 // SIGNED-NEXT:    [[TMP0:%.*]] = load i16, ptr @uf, align 2
599 // SIGNED-NEXT:    [[TMP1:%.*]] = load i32, ptr @a, align 4
600 // SIGNED-NEXT:    [[RESIZE:%.*]] = sext i32 [[TMP1]] to i33
601 // SIGNED-NEXT:    [[UPSCALE:%.*]] = shl i33 [[RESIZE]], 1
602 // SIGNED-NEXT:    [[RESIZE1:%.*]] = zext i16 [[TMP0]] to i33
603 // SIGNED-NEXT:    [[TMP2:%.*]] = call i33 @llvm.sdiv.fix.i33(i33 [[UPSCALE]], i33 [[RESIZE1]], i32 16)
604 // SIGNED-NEXT:    [[DOWNSCALE:%.*]] = ashr i33 [[TMP2]], 1
605 // SIGNED-NEXT:    [[RESIZE2:%.*]] = trunc i33 [[DOWNSCALE]] to i32
606 // SIGNED-NEXT:    store i32 [[RESIZE2]], ptr @a, align 4
607 // SIGNED-NEXT:    ret void
608 //
609 // UNSIGNED-LABEL: @div_auf(
610 // UNSIGNED-NEXT:  entry:
611 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, ptr @uf, align 2
612 // UNSIGNED-NEXT:    [[TMP1:%.*]] = load i32, ptr @a, align 4
613 // UNSIGNED-NEXT:    [[RESIZE:%.*]] = zext i16 [[TMP0]] to i32
614 // UNSIGNED-NEXT:    [[TMP2:%.*]] = call i32 @llvm.sdiv.fix.i32(i32 [[TMP1]], i32 [[RESIZE]], i32 15)
615 // UNSIGNED-NEXT:    store i32 [[TMP2]], ptr @a, align 4
616 // UNSIGNED-NEXT:    ret void
617 //
div_auf(void)618 void div_auf(void) {
619   a /= uf;
620 }
621 
622 // CHECK-LABEL: @div_ai(
623 // CHECK-NEXT:  entry:
624 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @i, align 4
625 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @a, align 4
626 // CHECK-NEXT:    [[RESIZE:%.*]] = sext i32 [[TMP1]] to i47
627 // CHECK-NEXT:    [[RESIZE1:%.*]] = sext i32 [[TMP0]] to i47
628 // CHECK-NEXT:    [[UPSCALE:%.*]] = shl i47 [[RESIZE1]], 15
629 // CHECK-NEXT:    [[TMP2:%.*]] = call i47 @llvm.sdiv.fix.i47(i47 [[RESIZE]], i47 [[UPSCALE]], i32 15)
630 // CHECK-NEXT:    [[RESIZE2:%.*]] = trunc i47 [[TMP2]] to i32
631 // CHECK-NEXT:    store i32 [[RESIZE2]], ptr @a, align 4
632 // CHECK-NEXT:    ret void
633 //
div_ai(void)634 void div_ai(void) {
635   a /= i;
636 }
637 
638 // CHECK-LABEL: @div_csa(
639 // CHECK-NEXT:  entry:
640 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @sa, align 4
641 // CHECK-NEXT:    [[TMP1:%.*]] = load i8, ptr @c, align 1
642 // CHECK-NEXT:    [[CONV:%.*]] = sext i8 [[TMP1]] to i32
643 // CHECK-NEXT:    [[RESIZE:%.*]] = sext i32 [[CONV]] to i47
644 // CHECK-NEXT:    [[UPSCALE:%.*]] = shl i47 [[RESIZE]], 15
645 // CHECK-NEXT:    [[RESIZE1:%.*]] = sext i32 [[TMP0]] to i47
646 // CHECK-NEXT:    [[TMP2:%.*]] = call i47 @llvm.sdiv.fix.sat.i47(i47 [[UPSCALE]], i47 [[RESIZE1]], i32 15)
647 // CHECK-NEXT:    [[TMP3:%.*]] = icmp sgt i47 [[TMP2]], 2147483647
648 // CHECK-NEXT:    [[SATMAX:%.*]] = select i1 [[TMP3]], i47 2147483647, i47 [[TMP2]]
649 // CHECK-NEXT:    [[TMP4:%.*]] = icmp slt i47 [[SATMAX]], -2147483648
650 // CHECK-NEXT:    [[SATMIN:%.*]] = select i1 [[TMP4]], i47 -2147483648, i47 [[SATMAX]]
651 // CHECK-NEXT:    [[RESIZE2:%.*]] = trunc i47 [[SATMIN]] to i32
652 // CHECK-NEXT:    [[TMP5:%.*]] = icmp slt i32 [[RESIZE2]], 0
653 // CHECK-NEXT:    [[TMP6:%.*]] = add i32 [[RESIZE2]], 32767
654 // CHECK-NEXT:    [[TMP7:%.*]] = select i1 [[TMP5]], i32 [[TMP6]], i32 [[RESIZE2]]
655 // CHECK-NEXT:    [[DOWNSCALE:%.*]] = ashr i32 [[TMP7]], 15
656 // CHECK-NEXT:    [[RESIZE3:%.*]] = trunc i32 [[DOWNSCALE]] to i8
657 // CHECK-NEXT:    store i8 [[RESIZE3]], ptr @c, align 1
658 // CHECK-NEXT:    ret void
659 //
div_csa(void)660 void div_csa(void) {
661   c /= sa;
662 }
663 
664 // CHECK-LABEL: @div_afl(
665 // CHECK-NEXT:  entry:
666 // CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr @fl, align 4
667 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @a, align 4
668 // CHECK-NEXT:    [[TMP2:%.*]] = sitofp i32 [[TMP1]] to float
669 // CHECK-NEXT:    [[TMP3:%.*]] = fmul float [[TMP2]], 0x3F00000000000000
670 // CHECK-NEXT:    [[DIV:%.*]] = fdiv float [[TMP3]], [[TMP0]]
671 // CHECK-NEXT:    [[TMP4:%.*]] = fmul float [[DIV]], 3.276800e+04
672 // CHECK-NEXT:    [[TMP5:%.*]] = fptosi float [[TMP4]] to i32
673 // CHECK-NEXT:    store i32 [[TMP5]], ptr @a, align 4
674 // CHECK-NEXT:    ret void
675 //
div_afl(void)676 void div_afl(void) {
677   a /= fl;
678 }
679 
680 
681 // CHECK-LABEL: @shft_ai(
682 // CHECK-NEXT:  entry:
683 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @i, align 4
684 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @a, align 4
685 // CHECK-NEXT:    [[TMP2:%.*]] = shl i32 [[TMP1]], [[TMP0]]
686 // CHECK-NEXT:    store i32 [[TMP2]], ptr @a, align 4
687 // CHECK-NEXT:    ret void
688 //
shft_ai(void)689 void shft_ai(void) {
690   a <<= i;
691 }
692 
693 // SIGNED-LABEL: @shft_sufi(
694 // SIGNED-NEXT:  entry:
695 // SIGNED-NEXT:    [[TMP0:%.*]] = load i32, ptr @i, align 4
696 // SIGNED-NEXT:    [[TMP1:%.*]] = load i16, ptr @suf, align 2
697 // SIGNED-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP0]] to i16
698 // SIGNED-NEXT:    [[TMP3:%.*]] = call i16 @llvm.ushl.sat.i16(i16 [[TMP1]], i16 [[TMP2]])
699 // SIGNED-NEXT:    store i16 [[TMP3]], ptr @suf, align 2
700 // SIGNED-NEXT:    ret void
701 //
702 // UNSIGNED-LABEL: @shft_sufi(
703 // UNSIGNED-NEXT:  entry:
704 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i32, ptr @i, align 4
705 // UNSIGNED-NEXT:    [[TMP1:%.*]] = load i16, ptr @suf, align 2
706 // UNSIGNED-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP0]] to i16
707 // UNSIGNED-NEXT:    [[TMP3:%.*]] = call i16 @llvm.sshl.sat.i16(i16 [[TMP1]], i16 [[TMP2]])
708 // UNSIGNED-NEXT:    store i16 [[TMP3]], ptr @suf, align 2
709 // UNSIGNED-NEXT:    ret void
710 //
shft_sufi(void)711 void shft_sufi(void) {
712   suf <<= i;
713 }
714 
715 // CHECK-LABEL: @shft_ulai(
716 // CHECK-NEXT:  entry:
717 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @i, align 4
718 // CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @ula, align 8
719 // CHECK-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP0]] to i64
720 // CHECK-NEXT:    [[TMP3:%.*]] = lshr i64 [[TMP1]], [[TMP2]]
721 // CHECK-NEXT:    store i64 [[TMP3]], ptr @ula, align 8
722 // CHECK-NEXT:    ret void
723 //
shft_ulai(void)724 void shft_ulai(void) {
725   ula >>= i;
726 }
727