xref: /llvm-project/llvm/test/Transforms/InstSimplify/rem.ll (revision 38fffa630ee80163dc65e759392ad29798905679)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -passes=instsimplify -S | FileCheck %s
3
4define i32 @zero_dividend(i32 %A) {
5; CHECK-LABEL: @zero_dividend(
6; CHECK-NEXT:    ret i32 0
7;
8  %B = urem i32 0, %A
9  ret i32 %B
10}
11
12define <2 x i32> @zero_dividend_vector(<2 x i32> %A) {
13; CHECK-LABEL: @zero_dividend_vector(
14; CHECK-NEXT:    ret <2 x i32> zeroinitializer
15;
16  %B = srem <2 x i32> zeroinitializer, %A
17  ret <2 x i32> %B
18}
19
20define <2 x i32> @zero_dividend_vector_poison_elt(<2 x i32> %A) {
21; CHECK-LABEL: @zero_dividend_vector_poison_elt(
22; CHECK-NEXT:    ret <2 x i32> zeroinitializer
23;
24  %B = urem <2 x i32> <i32 poison, i32 0>, %A
25  ret <2 x i32> %B
26}
27
28; Division-by-zero is poison. UB in any vector lane means the whole op is poison.
29
30define <2 x i8> @srem_zero_elt_vec_constfold(<2 x i8> %x) {
31; CHECK-LABEL: @srem_zero_elt_vec_constfold(
32; CHECK-NEXT:    ret <2 x i8> <i8 poison, i8 2>
33;
34  %rem = srem <2 x i8> <i8 1, i8 2>, <i8 0, i8 -42>
35  ret <2 x i8> %rem
36}
37
38define <2 x i8> @urem_zero_elt_vec_constfold(<2 x i8> %x) {
39; CHECK-LABEL: @urem_zero_elt_vec_constfold(
40; CHECK-NEXT:    ret <2 x i8> <i8 1, i8 poison>
41;
42  %rem = urem <2 x i8> <i8 1, i8 2>, <i8 42, i8 0>
43  ret <2 x i8> %rem
44}
45
46define <2 x i8> @srem_zero_elt_vec(<2 x i8> %x) {
47; CHECK-LABEL: @srem_zero_elt_vec(
48; CHECK-NEXT:    [[REM:%.*]] = srem <2 x i8> [[X:%.*]], <i8 -42, i8 0>
49; CHECK-NEXT:    ret <2 x i8> [[REM]]
50;
51  %rem = srem <2 x i8> %x, <i8 -42, i8 0>
52  ret <2 x i8> %rem
53}
54
55define <2 x i8> @urem_zero_elt_vec(<2 x i8> %x) {
56; CHECK-LABEL: @urem_zero_elt_vec(
57; CHECK-NEXT:    [[REM:%.*]] = urem <2 x i8> [[X:%.*]], <i8 0, i8 42>
58; CHECK-NEXT:    ret <2 x i8> [[REM]]
59;
60  %rem = urem <2 x i8> %x, <i8 0, i8 42>
61  ret <2 x i8> %rem
62}
63
64define <2 x i8> @srem_undef_elt_vec(<2 x i8> %x) {
65; CHECK-LABEL: @srem_undef_elt_vec(
66; CHECK-NEXT:    [[REM:%.*]] = srem <2 x i8> [[X:%.*]], <i8 -42, i8 undef>
67; CHECK-NEXT:    ret <2 x i8> [[REM]]
68;
69  %rem = srem <2 x i8> %x, <i8 -42, i8 undef>
70  ret <2 x i8> %rem
71}
72
73define <2 x i8> @urem_undef_elt_vec(<2 x i8> %x) {
74; CHECK-LABEL: @urem_undef_elt_vec(
75; CHECK-NEXT:    [[REM:%.*]] = urem <2 x i8> [[X:%.*]], <i8 undef, i8 42>
76; CHECK-NEXT:    ret <2 x i8> [[REM]]
77;
78  %rem = urem <2 x i8> %x, <i8 undef, i8 42>
79  ret <2 x i8> %rem
80}
81
82; Division-by-zero is undef. UB in any vector lane means the whole op is undef.
83; Thus, we can simplify this: if any element of 'y' is 0, we can do anything.
84; Therefore, assume that all elements of 'y' must be 1.
85
86define <2 x i1> @srem_bool_vec(<2 x i1> %x, <2 x i1> %y) {
87; CHECK-LABEL: @srem_bool_vec(
88; CHECK-NEXT:    ret <2 x i1> zeroinitializer
89;
90  %rem = srem <2 x i1> %x, %y
91  ret <2 x i1> %rem
92}
93
94define <2 x i1> @urem_bool_vec(<2 x i1> %x, <2 x i1> %y) {
95; CHECK-LABEL: @urem_bool_vec(
96; CHECK-NEXT:    ret <2 x i1> zeroinitializer
97;
98  %rem = urem <2 x i1> %x, %y
99  ret <2 x i1> %rem
100}
101
102define <2 x i32> @zext_bool_urem_divisor_vec(<2 x i1> %x, <2 x i32> %y) {
103; CHECK-LABEL: @zext_bool_urem_divisor_vec(
104; CHECK-NEXT:    ret <2 x i32> zeroinitializer
105;
106  %ext = zext <2 x i1> %x to <2 x i32>
107  %r = urem <2 x i32> %y, %ext
108  ret <2 x i32> %r
109}
110
111define i32 @zext_bool_srem_divisor(i1 %x, i32 %y) {
112; CHECK-LABEL: @zext_bool_srem_divisor(
113; CHECK-NEXT:    ret i32 0
114;
115  %ext = zext i1 %x to i32
116  %r = srem i32 %y, %ext
117  ret i32 %r
118}
119
120define i32 @select1(i32 %x, i1 %b) {
121; CHECK-LABEL: @select1(
122; CHECK-NEXT:    ret i32 0
123;
124  %rhs = select i1 %b, i32 %x, i32 1
125  %rem = srem i32 %x, %rhs
126  ret i32 %rem
127}
128
129define i32 @select2(i32 %x, i1 %b) {
130; CHECK-LABEL: @select2(
131; CHECK-NEXT:    ret i32 0
132;
133  %rhs = select i1 %b, i32 %x, i32 1
134  %rem = urem i32 %x, %rhs
135  ret i32 %rem
136}
137
138define i32 @rem1(i32 %x, i32 %n) {
139; CHECK-LABEL: @rem1(
140; CHECK-NEXT:    [[MOD:%.*]] = srem i32 [[X:%.*]], [[N:%.*]]
141; CHECK-NEXT:    ret i32 [[MOD]]
142;
143  %mod = srem i32 %x, %n
144  %mod1 = srem i32 %mod, %n
145  ret i32 %mod1
146}
147
148define i32 @rem2(i32 %x, i32 %n) {
149; CHECK-LABEL: @rem2(
150; CHECK-NEXT:    [[MOD:%.*]] = urem i32 [[X:%.*]], [[N:%.*]]
151; CHECK-NEXT:    ret i32 [[MOD]]
152;
153  %mod = urem i32 %x, %n
154  %mod1 = urem i32 %mod, %n
155  ret i32 %mod1
156}
157
158define i32 @rem3(i32 %x, i32 %n) {
159; CHECK-LABEL: @rem3(
160; CHECK-NEXT:    [[MOD:%.*]] = srem i32 [[X:%.*]], [[N:%.*]]
161; CHECK-NEXT:    [[MOD1:%.*]] = urem i32 [[MOD]], [[N]]
162; CHECK-NEXT:    ret i32 [[MOD1]]
163;
164  %mod = srem i32 %x, %n
165  %mod1 = urem i32 %mod, %n
166  ret i32 %mod1
167}
168
169define i32 @urem_dividend_known_smaller_than_constant_divisor(i32 %x) {
170; CHECK-LABEL: @urem_dividend_known_smaller_than_constant_divisor(
171; CHECK-NEXT:    [[AND:%.*]] = and i32 [[X:%.*]], 250
172; CHECK-NEXT:    ret i32 [[AND]]
173;
174  %and = and i32 %x, 250
175  %r = urem i32 %and, 251
176  ret i32 %r
177}
178
179define i32 @not_urem_dividend_known_smaller_than_constant_divisor(i32 %x) {
180; CHECK-LABEL: @not_urem_dividend_known_smaller_than_constant_divisor(
181; CHECK-NEXT:    [[AND:%.*]] = and i32 [[X:%.*]], 251
182; CHECK-NEXT:    [[R:%.*]] = urem i32 [[AND]], 251
183; CHECK-NEXT:    ret i32 [[R]]
184;
185  %and = and i32 %x, 251
186  %r = urem i32 %and, 251
187  ret i32 %r
188}
189
190define i8 @urem_dividend_known_smaller_than_constant_divisor2(i1 %b) {
191; CHECK-LABEL: @urem_dividend_known_smaller_than_constant_divisor2(
192; CHECK-NEXT:    [[T0:%.*]] = zext i1 [[B:%.*]] to i8
193; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[T0]], 12
194; CHECK-NEXT:    ret i8 [[XOR]]
195;
196  %t0 = zext i1 %b to i8
197  %xor = xor i8 %t0, 12
198  %r = urem i8 %xor, 14
199  ret i8 %r
200}
201
202; negative test - dividend can equal 13
203
204define i8 @not_urem_dividend_known_smaller_than_constant_divisor2(i1 %b) {
205; CHECK-LABEL: @not_urem_dividend_known_smaller_than_constant_divisor2(
206; CHECK-NEXT:    [[T0:%.*]] = zext i1 [[B:%.*]] to i8
207; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[T0]], 12
208; CHECK-NEXT:    [[R:%.*]] = urem i8 [[XOR]], 13
209; CHECK-NEXT:    ret i8 [[R]]
210;
211  %t0 = zext i1 %b to i8
212  %xor = xor i8 %t0, 12
213  %r = urem i8 %xor, 13
214  ret i8 %r
215}
216
217define i32 @urem_constant_dividend_known_smaller_than_divisor(i32 %x) {
218; CHECK-LABEL: @urem_constant_dividend_known_smaller_than_divisor(
219; CHECK-NEXT:    ret i32 250
220;
221  %or = or i32 %x, 251
222  %r = urem i32 250, %or
223  ret i32 %r
224}
225
226define i32 @not_urem_constant_dividend_known_smaller_than_divisor(i32 %x) {
227; CHECK-LABEL: @not_urem_constant_dividend_known_smaller_than_divisor(
228; CHECK-NEXT:    [[OR:%.*]] = or i32 [[X:%.*]], 251
229; CHECK-NEXT:    [[R:%.*]] = urem i32 251, [[OR]]
230; CHECK-NEXT:    ret i32 [[R]]
231;
232  %or = or i32 %x, 251
233  %r = urem i32 251, %or
234  ret i32 %r
235}
236
237; This would require computing known bits on both x and y. Is it worth doing?
238
239define i32 @urem_dividend_known_smaller_than_divisor(i32 %x, i32 %y) {
240; CHECK-LABEL: @urem_dividend_known_smaller_than_divisor(
241; CHECK-NEXT:    [[AND:%.*]] = and i32 [[X:%.*]], 250
242; CHECK-NEXT:    [[OR:%.*]] = or i32 [[Y:%.*]], 251
243; CHECK-NEXT:    [[R:%.*]] = urem i32 [[AND]], [[OR]]
244; CHECK-NEXT:    ret i32 [[R]]
245;
246  %and = and i32 %x, 250
247  %or = or i32 %y, 251
248  %r = urem i32 %and, %or
249  ret i32 %r
250}
251
252define i32 @not_urem_dividend_known_smaller_than_divisor(i32 %x, i32 %y) {
253; CHECK-LABEL: @not_urem_dividend_known_smaller_than_divisor(
254; CHECK-NEXT:    [[AND:%.*]] = and i32 [[X:%.*]], 251
255; CHECK-NEXT:    [[OR:%.*]] = or i32 [[Y:%.*]], 251
256; CHECK-NEXT:    [[R:%.*]] = urem i32 [[AND]], [[OR]]
257; CHECK-NEXT:    ret i32 [[R]]
258;
259  %and = and i32 %x, 251
260  %or = or i32 %y, 251
261  %r = urem i32 %and, %or
262  ret i32 %r
263}
264
265declare i32 @external()
266
267define i32 @rem4() {
268; CHECK-LABEL: @rem4(
269; CHECK-NEXT:    [[CALL:%.*]] = call i32 @external(), !range [[RNG0:![0-9]+]]
270; CHECK-NEXT:    ret i32 [[CALL]]
271;
272  %call = call i32 @external() , !range !0
273  %urem = urem i32 %call, 3
274  ret i32 %urem
275}
276
277!0 = !{i32 0, i32 3}
278
279define i32 @rem5(i32 %x, i32 %y) {
280; CHECK-LABEL: @rem5(
281; CHECK-NEXT:    ret i32 0
282;
283  %shl = shl nsw i32 %x, %y
284  %mod = srem i32 %shl, %x
285  ret i32 %mod
286}
287
288define <2 x i32> @rem6(<2 x i32> %x, <2 x i32> %y) {
289; CHECK-LABEL: @rem6(
290; CHECK-NEXT:    ret <2 x i32> zeroinitializer
291;
292  %shl = shl nsw <2 x i32> %x, %y
293  %mod = srem <2 x i32> %shl, %x
294  ret <2 x i32> %mod
295}
296
297; make sure the previous fold doesn't take place for wrapped shifts
298
299define i32 @rem7(i32 %x, i32 %y) {
300; CHECK-LABEL: @rem7(
301; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[X:%.*]], [[Y:%.*]]
302; CHECK-NEXT:    [[MOD:%.*]] = srem i32 [[SHL]], [[X]]
303; CHECK-NEXT:    ret i32 [[MOD]]
304;
305  %shl = shl i32 %x, %y
306  %mod = srem i32 %shl, %x
307  ret i32 %mod
308}
309
310define i32 @rem8(i32 %x, i32 %y) {
311; CHECK-LABEL: @rem8(
312; CHECK-NEXT:    ret i32 0
313;
314  %shl = shl nuw i32 %x, %y
315  %mod = urem i32 %shl, %x
316  ret i32 %mod
317}
318
319define <2 x i32> @rem9(<2 x i32> %x, <2 x i32> %y) {
320; CHECK-LABEL: @rem9(
321; CHECK-NEXT:    ret <2 x i32> zeroinitializer
322;
323  %shl = shl nuw <2 x i32> %x, %y
324  %mod = urem <2 x i32> %shl, %x
325  ret <2 x i32> %mod
326}
327
328; make sure the previous fold doesn't take place for wrapped shifts
329
330define i32 @rem10(i32 %x, i32 %y) {
331; CHECK-LABEL: @rem10(
332; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[X:%.*]], [[Y:%.*]]
333; CHECK-NEXT:    [[MOD:%.*]] = urem i32 [[SHL]], [[X]]
334; CHECK-NEXT:    ret i32 [[MOD]]
335;
336  %shl = shl i32 %x, %y
337  %mod = urem i32 %shl, %x
338  ret i32 %mod
339}
340
341define i32 @srem_with_sext_bool_divisor(i1 %x, i32 %y) {
342; CHECK-LABEL: @srem_with_sext_bool_divisor(
343; CHECK-NEXT:    ret i32 0
344;
345  %s = sext i1 %x to i32
346  %r = srem i32 %y, %s
347  ret i32 %r
348}
349
350define <2 x i32> @srem_with_sext_bool_divisor_vec(<2 x i1> %x, <2 x i32> %y) {
351; CHECK-LABEL: @srem_with_sext_bool_divisor_vec(
352; CHECK-NEXT:    ret <2 x i32> zeroinitializer
353;
354  %s = sext <2 x i1> %x to <2 x i32>
355  %r = srem <2 x i32> %y, %s
356  ret <2 x i32> %r
357}
358
359define i8 @srem_minusone_divisor() {
360; CHECK-LABEL: @srem_minusone_divisor(
361; CHECK-NEXT:    ret i8 poison
362;
363  %v = srem i8 -128, -1
364  ret i8 %v
365}
366
367define i32 @srem_of_mul_nsw(i32 %x, i32 %y) {
368; CHECK-LABEL: @srem_of_mul_nsw(
369; CHECK-NEXT:    ret i32 0
370;
371  %mul = mul nsw i32 %x, %y
372  %mod = srem i32 %mul, %y
373  ret i32 %mod
374}
375
376; Verify that the optimization kicks in for:
377;   - Y * X % Y as well as X * Y % Y
378;   - vector types
379define <2 x i32> @srem_of_mul_nsw_vec_commuted(<2 x i32> %x, <2 x i32> %y) {
380; CHECK-LABEL: @srem_of_mul_nsw_vec_commuted(
381; CHECK-NEXT:    ret <2 x i32> zeroinitializer
382;
383  %mul = mul nsw <2 x i32> %y, %x
384  %mod = srem <2 x i32> %mul, %y
385  ret <2 x i32> %mod
386}
387
388define i32 @srem_of_mul_nuw(i32 %x, i32 %y) {
389; CHECK-LABEL: @srem_of_mul_nuw(
390; CHECK-NEXT:    [[MUL:%.*]] = mul nuw i32 [[X:%.*]], [[Y:%.*]]
391; CHECK-NEXT:    [[MOD:%.*]] = srem i32 [[MUL]], [[Y]]
392; CHECK-NEXT:    ret i32 [[MOD]]
393;
394  %mul = mul nuw i32 %x, %y
395  %mod = srem i32 %mul, %y
396  ret i32 %mod
397}
398
399define i32 @srem_of_mul(i32 %x, i32 %y) {
400; CHECK-LABEL: @srem_of_mul(
401; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[X:%.*]], [[Y:%.*]]
402; CHECK-NEXT:    [[MOD:%.*]] = srem i32 [[MUL]], [[Y]]
403; CHECK-NEXT:    ret i32 [[MOD]]
404;
405  %mul = mul i32 %x, %y
406  %mod = srem i32 %mul, %y
407  ret i32 %mod
408}
409
410define i32 @urem_of_mul_nsw(i32 %x, i32 %y) {
411; CHECK-LABEL: @urem_of_mul_nsw(
412; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[X:%.*]], [[Y:%.*]]
413; CHECK-NEXT:    [[MOD:%.*]] = urem i32 [[MUL]], [[Y]]
414; CHECK-NEXT:    ret i32 [[MOD]]
415;
416  %mul = mul nsw i32 %x, %y
417  %mod = urem i32 %mul, %y
418  ret i32 %mod
419}
420
421define i32 @urem_of_mul_nuw(i32 %x, i32 %y) {
422; CHECK-LABEL: @urem_of_mul_nuw(
423; CHECK-NEXT:    ret i32 0
424;
425  %mul = mul nuw i32 %x, %y
426  %mod = urem i32 %mul, %y
427  ret i32 %mod
428}
429
430define <2 x i32> @srem_of_mul_nuw_vec_commuted(<2 x i32> %x, <2 x i32> %y) {
431; CHECK-LABEL: @srem_of_mul_nuw_vec_commuted(
432; CHECK-NEXT:    ret <2 x i32> zeroinitializer
433;
434  %mul = mul nuw <2 x i32> %y, %x
435  %mod = urem <2 x i32> %mul, %y
436  ret <2 x i32> %mod
437}
438
439define i32 @urem_of_mul(i32 %x, i32 %y) {
440; CHECK-LABEL: @urem_of_mul(
441; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[X:%.*]], [[Y:%.*]]
442; CHECK-NEXT:    [[MOD:%.*]] = urem i32 [[MUL]], [[Y]]
443; CHECK-NEXT:    ret i32 [[MOD]]
444;
445  %mul = mul i32 %x, %y
446  %mod = urem i32 %mul, %y
447  ret i32 %mod
448}
449
450define i4 @srem_mul_sdiv(i4 %x, i4 %y) {
451; CHECK-LABEL: @srem_mul_sdiv(
452; CHECK-NEXT:    ret i4 0
453;
454  %d = sdiv i4 %x, %y
455  %mul = mul i4 %d, %y
456  %mod = srem i4 %mul, %y
457  ret i4 %mod
458}
459
460define i8 @srem_mul_udiv(i8 %x, i8 %y) {
461; CHECK-LABEL: @srem_mul_udiv(
462; CHECK-NEXT:    [[D:%.*]] = udiv i8 [[X:%.*]], [[Y:%.*]]
463; CHECK-NEXT:    [[MUL:%.*]] = mul i8 [[D]], [[Y]]
464; CHECK-NEXT:    [[MOD:%.*]] = srem i8 [[MUL]], [[Y]]
465; CHECK-NEXT:    ret i8 [[MOD]]
466;
467  %d = udiv i8 %x, %y
468  %mul = mul i8 %d, %y
469  %mod = srem i8 %mul, %y
470  ret i8 %mod
471}
472
473define <3 x i7> @urem_mul_udiv_vec_commuted(<3 x i7> %x, <3 x i7> %y) {
474; CHECK-LABEL: @urem_mul_udiv_vec_commuted(
475; CHECK-NEXT:    ret <3 x i7> zeroinitializer
476;
477  %d = udiv <3 x i7> %x, %y
478  %mul = mul <3 x i7> %y, %d
479  %mod = urem <3 x i7> %mul, %y
480  ret <3 x i7> %mod
481}
482
483define i8 @urem_mul_sdiv(i8 %x, i8 %y) {
484; CHECK-LABEL: @urem_mul_sdiv(
485; CHECK-NEXT:    [[D:%.*]] = sdiv i8 [[X:%.*]], [[Y:%.*]]
486; CHECK-NEXT:    [[MUL:%.*]] = mul i8 [[Y]], [[D]]
487; CHECK-NEXT:    [[MOD:%.*]] = urem i8 [[MUL]], [[Y]]
488; CHECK-NEXT:    ret i8 [[MOD]]
489;
490  %d = sdiv i8 %x, %y
491  %mul = mul i8 %y, %d
492  %mod = urem i8 %mul, %y
493  ret i8 %mod
494}
495
496define <2 x i8> @simplfy_srem_of_mul(<2 x i8> %x) {
497; CHECK-LABEL: @simplfy_srem_of_mul(
498; CHECK-NEXT:    ret <2 x i8> zeroinitializer
499;
500  %mul = mul nsw <2 x i8> %x, <i8 20, i8 10>
501  %r = srem <2 x i8> %mul, <i8 5, i8 5>
502  ret <2 x i8> %r
503}
504
505define <2 x i8> @simplfy_srem_of_mul_fail_bad_mod(<2 x i8> %x) {
506; CHECK-LABEL: @simplfy_srem_of_mul_fail_bad_mod(
507; CHECK-NEXT:    [[MUL:%.*]] = mul nsw <2 x i8> [[X:%.*]], <i8 20, i8 11>
508; CHECK-NEXT:    [[R:%.*]] = srem <2 x i8> [[MUL]], splat (i8 5)
509; CHECK-NEXT:    ret <2 x i8> [[R]]
510;
511  %mul = mul nsw <2 x i8> %x, <i8 20, i8 11>
512  %r = srem <2 x i8> %mul, <i8 5, i8 5>
513  ret <2 x i8> %r
514}
515
516define i8 @simplfy_urem_of_mul(i8 %x) {
517; CHECK-LABEL: @simplfy_urem_of_mul(
518; CHECK-NEXT:    ret i8 0
519;
520  %mul = mul nuw i8 %x, 30
521  %r = urem i8 %mul, 10
522  ret i8 %r
523}
524
525define i8 @simplfy_urem_of_mul_fail_bad_flag(i8 %x) {
526; CHECK-LABEL: @simplfy_urem_of_mul_fail_bad_flag(
527; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i8 [[X:%.*]], 30
528; CHECK-NEXT:    [[R:%.*]] = urem i8 [[MUL]], 10
529; CHECK-NEXT:    ret i8 [[R]]
530;
531  %mul = mul nsw i8 %x, 30
532  %r = urem i8 %mul, 10
533  ret i8 %r
534}
535
536define i8 @simplfy_urem_of_mul_fail_bad_mod(i8 %x) {
537; CHECK-LABEL: @simplfy_urem_of_mul_fail_bad_mod(
538; CHECK-NEXT:    [[MUL:%.*]] = mul nuw i8 [[X:%.*]], 31
539; CHECK-NEXT:    [[R:%.*]] = urem i8 [[MUL]], 10
540; CHECK-NEXT:    ret i8 [[R]]
541;
542  %mul = mul nuw i8 %x, 31
543  %r = urem i8 %mul, 10
544  ret i8 %r
545}
546