xref: /llvm-project/llvm/test/Transforms/CodeGenPrepare/X86/overflow-intrinsics.ll (revision cee313d288a4faf0355d76fb6e0e927e211d08a5)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -codegenprepare -S < %s | FileCheck %s
3; RUN: opt -enable-debugify -codegenprepare -S < %s 2>&1 | FileCheck %s -check-prefix=DEBUG
4
5target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
6target triple = "x86_64-apple-darwin10.0.0"
7
8define i64 @uaddo1(i64 %a, i64 %b) nounwind ssp {
9; CHECK-LABEL: @uaddo1(
10; CHECK-NEXT:    [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[B:%.*]], i64 [[A:%.*]])
11; CHECK-NEXT:    [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
12; CHECK-NEXT:    [[OV:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
13; CHECK-NEXT:    [[Q:%.*]] = select i1 [[OV]], i64 [[B]], i64 42
14; CHECK-NEXT:    ret i64 [[Q]]
15;
16  %add = add i64 %b, %a
17  %cmp = icmp ult i64 %add, %a
18  %Q = select i1 %cmp, i64 %b, i64 42
19  ret i64 %Q
20}
21
22define i64 @uaddo2(i64 %a, i64 %b) nounwind ssp {
23; CHECK-LABEL: @uaddo2(
24; CHECK-NEXT:    [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[B:%.*]], i64 [[A:%.*]])
25; CHECK-NEXT:    [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
26; CHECK-NEXT:    [[OV:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
27; CHECK-NEXT:    [[Q:%.*]] = select i1 [[OV]], i64 [[B]], i64 42
28; CHECK-NEXT:    ret i64 [[Q]]
29;
30  %add = add i64 %b, %a
31  %cmp = icmp ult i64 %add, %b
32  %Q = select i1 %cmp, i64 %b, i64 42
33  ret i64 %Q
34}
35
36define i64 @uaddo3(i64 %a, i64 %b) nounwind ssp {
37; CHECK-LABEL: @uaddo3(
38; CHECK-NEXT:    [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[B:%.*]], i64 [[A:%.*]])
39; CHECK-NEXT:    [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
40; CHECK-NEXT:    [[OV:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
41; CHECK-NEXT:    [[Q:%.*]] = select i1 [[OV]], i64 [[B]], i64 42
42; CHECK-NEXT:    ret i64 [[Q]]
43;
44  %add = add i64 %b, %a
45  %cmp = icmp ugt i64 %b, %add
46  %Q = select i1 %cmp, i64 %b, i64 42
47  ret i64 %Q
48}
49
50define i64 @uaddo4(i64 %a, i64 %b, i1 %c) nounwind ssp {
51; CHECK-LABEL: @uaddo4(
52; CHECK-NEXT:  entry:
53; CHECK-NEXT:    br i1 [[C:%.*]], label [[NEXT:%.*]], label [[EXIT:%.*]]
54; CHECK:       next:
55; CHECK-NEXT:    [[TMP0:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[B:%.*]], i64 [[A:%.*]])
56; CHECK-NEXT:    [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP0]], 0
57; CHECK-NEXT:    [[OV:%.*]] = extractvalue { i64, i1 } [[TMP0]], 1
58; CHECK-NEXT:    [[Q:%.*]] = select i1 [[OV]], i64 [[B]], i64 42
59; CHECK-NEXT:    ret i64 [[Q]]
60; CHECK:       exit:
61; CHECK-NEXT:    ret i64 0
62;
63entry:
64  %add = add i64 %b, %a
65  %cmp = icmp ugt i64 %b, %add
66  br i1 %c, label %next, label %exit
67
68next:
69  %Q = select i1 %cmp, i64 %b, i64 42
70  ret i64 %Q
71
72exit:
73  ret i64 0
74}
75
76define i64 @uaddo5(i64 %a, i64 %b, i64* %ptr, i1 %c) nounwind ssp {
77; CHECK-LABEL: @uaddo5(
78; CHECK-NEXT:  entry:
79; CHECK-NEXT:    [[ADD:%.*]] = add i64 [[B:%.*]], [[A:%.*]]
80; CHECK-NEXT:    store i64 [[ADD]], i64* [[PTR:%.*]]
81; CHECK-NEXT:    br i1 [[C:%.*]], label [[NEXT:%.*]], label [[EXIT:%.*]]
82; CHECK:       next:
83; CHECK-NEXT:    [[TMP0:%.*]] = icmp ugt i64 [[B]], [[ADD]]
84; CHECK-NEXT:    [[Q:%.*]] = select i1 [[TMP0]], i64 [[B]], i64 42
85; CHECK-NEXT:    ret i64 [[Q]]
86; CHECK:       exit:
87; CHECK-NEXT:    ret i64 0
88;
89entry:
90  %add = add i64 %b, %a
91  store i64 %add, i64* %ptr
92  %cmp = icmp ugt i64 %b, %add
93  br i1 %c, label %next, label %exit
94
95next:
96  %Q = select i1 %cmp, i64 %b, i64 42
97  ret i64 %Q
98
99exit:
100  ret i64 0
101}
102
103; When adding 1, the general pattern for add-overflow may be different due to icmp canonicalization.
104; PR31754: https://bugs.llvm.org/show_bug.cgi?id=31754
105
106define i1 @uaddo_i64_increment(i64 %x, i64* %p) {
107; CHECK-LABEL: @uaddo_i64_increment(
108; CHECK-NEXT:    [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[X:%.*]], i64 1)
109; CHECK-NEXT:    [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
110; CHECK-NEXT:    [[OV1:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
111; CHECK-NEXT:    store i64 [[MATH]], i64* [[P:%.*]]
112; CHECK-NEXT:    ret i1 [[OV1]]
113;
114  %a = add i64 %x, 1
115  %ov = icmp eq i64 %a, 0
116  store i64 %a, i64* %p
117  ret i1 %ov
118}
119
120define i1 @uaddo_i8_increment_noncanonical_1(i8 %x, i8* %p) {
121; CHECK-LABEL: @uaddo_i8_increment_noncanonical_1(
122; CHECK-NEXT:    [[TMP1:%.*]] = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 1, i8 [[X:%.*]])
123; CHECK-NEXT:    [[MATH:%.*]] = extractvalue { i8, i1 } [[TMP1]], 0
124; CHECK-NEXT:    [[OV1:%.*]] = extractvalue { i8, i1 } [[TMP1]], 1
125; CHECK-NEXT:    store i8 [[MATH]], i8* [[P:%.*]]
126; CHECK-NEXT:    ret i1 [[OV1]]
127;
128  %a = add i8 1, %x        ; commute
129  %ov = icmp eq i8 %a, 0
130  store i8 %a, i8* %p
131  ret i1 %ov
132}
133
134define i1 @uaddo_i32_increment_noncanonical_2(i32 %x, i32* %p) {
135; CHECK-LABEL: @uaddo_i32_increment_noncanonical_2(
136; CHECK-NEXT:    [[TMP1:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 1)
137; CHECK-NEXT:    [[MATH:%.*]] = extractvalue { i32, i1 } [[TMP1]], 0
138; CHECK-NEXT:    [[OV1:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
139; CHECK-NEXT:    store i32 [[MATH]], i32* [[P:%.*]]
140; CHECK-NEXT:    ret i1 [[OV1]]
141;
142  %a = add i32 %x, 1
143  %ov = icmp eq i32 0, %a   ; commute
144  store i32 %a, i32* %p
145  ret i1 %ov
146}
147
148define i1 @uaddo_i16_increment_noncanonical_3(i16 %x, i16* %p) {
149; CHECK-LABEL: @uaddo_i16_increment_noncanonical_3(
150; CHECK-NEXT:    [[TMP1:%.*]] = call { i16, i1 } @llvm.uadd.with.overflow.i16(i16 1, i16 [[X:%.*]])
151; CHECK-NEXT:    [[MATH:%.*]] = extractvalue { i16, i1 } [[TMP1]], 0
152; CHECK-NEXT:    [[OV1:%.*]] = extractvalue { i16, i1 } [[TMP1]], 1
153; CHECK-NEXT:    store i16 [[MATH]], i16* [[P:%.*]]
154; CHECK-NEXT:    ret i1 [[OV1]]
155;
156  %a = add i16 1, %x        ; commute
157  %ov = icmp eq i16 0, %a   ; commute
158  store i16 %a, i16* %p
159  ret i1 %ov
160}
161
162; The overflow check may be against the input rather than the sum.
163
164define i1 @uaddo_i64_increment_alt(i64 %x, i64* %p) {
165; CHECK-LABEL: @uaddo_i64_increment_alt(
166; CHECK-NEXT:    [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[X:%.*]], i64 1)
167; CHECK-NEXT:    [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
168; CHECK-NEXT:    [[OV1:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
169; CHECK-NEXT:    store i64 [[MATH]], i64* [[P:%.*]]
170; CHECK-NEXT:    ret i1 [[OV1]]
171;
172  %a = add i64 %x, 1
173  store i64 %a, i64* %p
174  %ov = icmp eq i64 %x, -1
175  ret i1 %ov
176}
177
178; Make sure insertion is done correctly based on dominance.
179
180define i1 @uaddo_i64_increment_alt_dom(i64 %x, i64* %p) {
181; CHECK-LABEL: @uaddo_i64_increment_alt_dom(
182; CHECK-NEXT:    [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[X:%.*]], i64 1)
183; CHECK-NEXT:    [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
184; CHECK-NEXT:    [[OV1:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
185; CHECK-NEXT:    store i64 [[MATH]], i64* [[P:%.*]]
186; CHECK-NEXT:    ret i1 [[OV1]]
187;
188  %ov = icmp eq i64 %x, -1
189  %a = add i64 %x, 1
190  store i64 %a, i64* %p
191  ret i1 %ov
192}
193
194; The overflow check may be against the input rather than the sum.
195
196define i1 @uaddo_i64_decrement_alt(i64 %x, i64* %p) {
197; CHECK-LABEL: @uaddo_i64_decrement_alt(
198; CHECK-NEXT:    [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[X:%.*]], i64 -1)
199; CHECK-NEXT:    [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
200; CHECK-NEXT:    [[OV1:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
201; CHECK-NEXT:    store i64 [[MATH]], i64* [[P:%.*]]
202; CHECK-NEXT:    ret i1 [[OV1]]
203;
204  %a = add i64 %x, -1
205  store i64 %a, i64* %p
206  %ov = icmp ne i64 %x, 0
207  ret i1 %ov
208}
209
210; Make sure insertion is done correctly based on dominance.
211
212define i1 @uaddo_i64_decrement_alt_dom(i64 %x, i64* %p) {
213; CHECK-LABEL: @uaddo_i64_decrement_alt_dom(
214; CHECK-NEXT:    [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[X:%.*]], i64 -1)
215; CHECK-NEXT:    [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
216; CHECK-NEXT:    [[OV1:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
217; CHECK-NEXT:    store i64 [[MATH]], i64* [[P:%.*]]
218; CHECK-NEXT:    ret i1 [[OV1]]
219;
220  %ov = icmp ne i64 %x, 0
221  %a = add i64 %x, -1
222  store i64 %a, i64* %p
223  ret i1 %ov
224}
225
226; No transform for illegal types.
227
228define i1 @uaddo_i42_increment_illegal_type(i42 %x, i42* %p) {
229; CHECK-LABEL: @uaddo_i42_increment_illegal_type(
230; CHECK-NEXT:    [[A:%.*]] = add i42 [[X:%.*]], 1
231; CHECK-NEXT:    [[OV:%.*]] = icmp eq i42 [[A]], 0
232; CHECK-NEXT:    store i42 [[A]], i42* [[P:%.*]]
233; CHECK-NEXT:    ret i1 [[OV]]
234;
235  %a = add i42 %x, 1
236  %ov = icmp eq i42 %a, 0
237  store i42 %a, i42* %p
238  ret i1 %ov
239}
240
241define i1 @usubo_ult_i64(i64 %x, i64 %y, i64* %p) {
242; CHECK-LABEL: @usubo_ult_i64(
243; CHECK-NEXT:    [[TMP1:%.*]] = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 [[X:%.*]], i64 [[Y:%.*]])
244; CHECK-NEXT:    [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
245; CHECK-NEXT:    [[OV1:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
246; CHECK-NEXT:    store i64 [[MATH]], i64* [[P:%.*]]
247; CHECK-NEXT:    ret i1 [[OV1]]
248;
249  %s = sub i64 %x, %y
250  store i64 %s, i64* %p
251  %ov = icmp ult i64 %x, %y
252  ret i1 %ov
253}
254
255; Verify insertion point for single-BB. Toggle predicate.
256
257define i1 @usubo_ugt_i32(i32 %x, i32 %y, i32* %p) {
258; CHECK-LABEL: @usubo_ugt_i32(
259; CHECK-NEXT:    [[TMP1:%.*]] = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 [[X:%.*]], i32 [[Y:%.*]])
260; CHECK-NEXT:    [[MATH:%.*]] = extractvalue { i32, i1 } [[TMP1]], 0
261; CHECK-NEXT:    [[OV1:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
262; CHECK-NEXT:    store i32 [[MATH]], i32* [[P:%.*]]
263; CHECK-NEXT:    ret i1 [[OV1]]
264;
265  %ov = icmp ugt i32 %y, %x
266  %s = sub i32 %x, %y
267  store i32 %s, i32* %p
268  ret i1 %ov
269}
270
271; Constant operand should match.
272
273define i1 @usubo_ugt_constant_op0_i8(i8 %x, i8* %p) {
274; CHECK-LABEL: @usubo_ugt_constant_op0_i8(
275; CHECK-NEXT:    [[TMP1:%.*]] = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 42, i8 [[X:%.*]])
276; CHECK-NEXT:    [[MATH:%.*]] = extractvalue { i8, i1 } [[TMP1]], 0
277; CHECK-NEXT:    [[OV1:%.*]] = extractvalue { i8, i1 } [[TMP1]], 1
278; CHECK-NEXT:    store i8 [[MATH]], i8* [[P:%.*]]
279; CHECK-NEXT:    ret i1 [[OV1]]
280;
281  %s = sub i8 42, %x
282  %ov = icmp ugt i8 %x, 42
283  store i8 %s, i8* %p
284  ret i1 %ov
285}
286
287; Compare with constant operand 0 is canonicalized by commuting, but verify match for non-canonical form.
288
289define i1 @usubo_ult_constant_op0_i16(i16 %x, i16* %p) {
290; CHECK-LABEL: @usubo_ult_constant_op0_i16(
291; CHECK-NEXT:    [[TMP1:%.*]] = call { i16, i1 } @llvm.usub.with.overflow.i16(i16 43, i16 [[X:%.*]])
292; CHECK-NEXT:    [[MATH:%.*]] = extractvalue { i16, i1 } [[TMP1]], 0
293; CHECK-NEXT:    [[OV1:%.*]] = extractvalue { i16, i1 } [[TMP1]], 1
294; CHECK-NEXT:    store i16 [[MATH]], i16* [[P:%.*]]
295; CHECK-NEXT:    ret i1 [[OV1]]
296;
297  %s = sub i16 43, %x
298  %ov = icmp ult i16 43, %x
299  store i16 %s, i16* %p
300  ret i1 %ov
301}
302
303; Subtract with constant operand 1 is canonicalized to add.
304
305define i1 @usubo_ult_constant_op1_i16(i16 %x, i16* %p) {
306; CHECK-LABEL: @usubo_ult_constant_op1_i16(
307; CHECK-NEXT:    [[TMP1:%.*]] = call { i16, i1 } @llvm.usub.with.overflow.i16(i16 [[X:%.*]], i16 44)
308; CHECK-NEXT:    [[MATH:%.*]] = extractvalue { i16, i1 } [[TMP1]], 0
309; CHECK-NEXT:    [[OV1:%.*]] = extractvalue { i16, i1 } [[TMP1]], 1
310; CHECK-NEXT:    store i16 [[MATH]], i16* [[P:%.*]]
311; CHECK-NEXT:    ret i1 [[OV1]]
312;
313  %s = add i16 %x, -44
314  %ov = icmp ult i16 %x, 44
315  store i16 %s, i16* %p
316  ret i1 %ov
317}
318
319define i1 @usubo_ugt_constant_op1_i8(i8 %x, i8* %p) {
320; CHECK-LABEL: @usubo_ugt_constant_op1_i8(
321; CHECK-NEXT:    [[TMP1:%.*]] = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 [[X:%.*]], i8 45)
322; CHECK-NEXT:    [[MATH:%.*]] = extractvalue { i8, i1 } [[TMP1]], 0
323; CHECK-NEXT:    [[OV1:%.*]] = extractvalue { i8, i1 } [[TMP1]], 1
324; CHECK-NEXT:    store i8 [[MATH]], i8* [[P:%.*]]
325; CHECK-NEXT:    ret i1 [[OV1]]
326;
327  %ov = icmp ugt i8 45, %x
328  %s = add i8 %x, -45
329  store i8 %s, i8* %p
330  ret i1 %ov
331}
332
333; Special-case: subtract 1 changes the compare predicate and constant.
334
335define i1 @usubo_eq_constant1_op1_i32(i32 %x, i32* %p) {
336; CHECK-LABEL: @usubo_eq_constant1_op1_i32(
337; CHECK-NEXT:    [[TMP1:%.*]] = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 [[X:%.*]], i32 1)
338; CHECK-NEXT:    [[MATH:%.*]] = extractvalue { i32, i1 } [[TMP1]], 0
339; CHECK-NEXT:    [[OV1:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
340; CHECK-NEXT:    store i32 [[MATH]], i32* [[P:%.*]]
341; CHECK-NEXT:    ret i1 [[OV1]]
342;
343  %s = add i32 %x, -1
344  %ov = icmp eq i32 %x, 0
345  store i32 %s, i32* %p
346  ret i1 %ov
347}
348
349; Special-case: subtract from 0 (negate) changes the compare predicate.
350
351define i1 @usubo_ne_constant0_op1_i32(i32 %x, i32* %p) {
352; CHECK-LABEL: @usubo_ne_constant0_op1_i32(
353; CHECK-NEXT:    [[TMP1:%.*]] = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 0, i32 [[X:%.*]])
354; CHECK-NEXT:    [[MATH:%.*]] = extractvalue { i32, i1 } [[TMP1]], 0
355; CHECK-NEXT:    [[OV1:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
356; CHECK-NEXT:    store i32 [[MATH]], i32* [[P:%.*]]
357; CHECK-NEXT:    ret i1 [[OV1]]
358;
359  %s = sub i32 0, %x
360  %ov = icmp ne i32 %x, 0
361  store i32 %s, i32* %p
362  ret i1 %ov
363}
364
365; Verify insertion point for multi-BB.
366
367declare void @call(i1)
368
369define i1 @usubo_ult_sub_dominates_i64(i64 %x, i64 %y, i64* %p, i1 %cond) {
370; CHECK-LABEL: @usubo_ult_sub_dominates_i64(
371; CHECK-NEXT:  entry:
372; CHECK-NEXT:    br i1 [[COND:%.*]], label [[T:%.*]], label [[F:%.*]]
373; CHECK:       t:
374; CHECK-NEXT:    [[TMP0:%.*]] = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 [[X:%.*]], i64 [[Y:%.*]])
375; CHECK-NEXT:    [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP0]], 0
376; CHECK-NEXT:    [[OV1:%.*]] = extractvalue { i64, i1 } [[TMP0]], 1
377; CHECK-NEXT:    store i64 [[MATH]], i64* [[P:%.*]]
378; CHECK-NEXT:    br i1 [[COND]], label [[END:%.*]], label [[F]]
379; CHECK:       f:
380; CHECK-NEXT:    ret i1 [[COND]]
381; CHECK:       end:
382; CHECK-NEXT:    ret i1 [[OV1]]
383;
384entry:
385  br i1 %cond, label %t, label %f
386
387t:
388  %s = sub i64 %x, %y
389  store i64 %s, i64* %p
390  br i1 %cond, label %end, label %f
391
392f:
393  ret i1 %cond
394
395end:
396  %ov = icmp ult i64 %x, %y
397  ret i1 %ov
398}
399
400define i1 @usubo_ult_cmp_dominates_i64(i64 %x, i64 %y, i64* %p, i1 %cond) {
401; CHECK-LABEL: @usubo_ult_cmp_dominates_i64(
402; CHECK-NEXT:  entry:
403; CHECK-NEXT:    br i1 [[COND:%.*]], label [[T:%.*]], label [[F:%.*]]
404; CHECK:       t:
405; CHECK-NEXT:    [[OV:%.*]] = icmp ult i64 [[X:%.*]], [[Y:%.*]]
406; CHECK-NEXT:    call void @call(i1 [[OV]])
407; CHECK-NEXT:    br i1 [[OV]], label [[END:%.*]], label [[F]]
408; CHECK:       f:
409; CHECK-NEXT:    ret i1 [[COND]]
410; CHECK:       end:
411; CHECK-NEXT:    [[TMP0:%.*]] = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 [[X]], i64 [[Y]])
412; CHECK-NEXT:    [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP0]], 0
413; CHECK-NEXT:    [[OV1:%.*]] = extractvalue { i64, i1 } [[TMP0]], 1
414; CHECK-NEXT:    store i64 [[MATH]], i64* [[P:%.*]]
415; CHECK-NEXT:    ret i1 [[OV1]]
416;
417entry:
418  br i1 %cond, label %t, label %f
419
420t:
421  %ov = icmp ult i64 %x, %y
422  call void @call(i1 %ov)
423  br i1 %ov, label %end, label %f
424
425f:
426  ret i1 %cond
427
428end:
429  %s = sub i64 %x, %y
430  store i64 %s, i64* %p
431  ret i1 %ov
432}
433
434; Verify that crazy/non-canonical code does not crash.
435
436define void @bar() {
437; CHECK-LABEL: @bar(
438; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i64 1, -1
439; CHECK-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8
440; CHECK-NEXT:    unreachable
441;
442  %cmp = icmp eq i64 1, -1
443  %frombool = zext i1 %cmp to i8
444  unreachable
445}
446
447define void @foo() {
448; CHECK-LABEL: @foo(
449; CHECK-NEXT:    [[SUB:%.*]] = add nsw i64 1, 1
450; CHECK-NEXT:    [[CONV:%.*]] = trunc i64 [[SUB]] to i32
451; CHECK-NEXT:    unreachable
452;
453  %sub = add nsw i64 1, 1
454  %conv = trunc i64 %sub to i32
455  unreachable
456}
457
458; Similarly for usubo.
459
460define i1 @bar2() {
461; CHECK-LABEL: @bar2(
462; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i64 1, 0
463; CHECK-NEXT:    ret i1 [[CMP]]
464;
465  %cmp = icmp eq i64 1, 0
466  ret i1 %cmp
467}
468
469define i64 @foo2(i8 *%p) {
470; CHECK-LABEL: @foo2(
471; CHECK-NEXT:    [[SUB:%.*]] = add nsw i64 1, -1
472; CHECK-NEXT:    ret i64 [[SUB]]
473;
474  %sub = add nsw i64 1, -1
475  ret i64 %sub
476}
477
478; Avoid hoisting a math op into a dominating block which would
479; increase the critical path.
480
481define void @PR41129(i64* %p64) {
482; CHECK-LABEL: @PR41129(
483; CHECK-NEXT:  entry:
484; CHECK-NEXT:    [[KEY:%.*]] = load i64, i64* [[P64:%.*]], align 8
485; CHECK-NEXT:    [[COND17:%.*]] = icmp eq i64 [[KEY]], 0
486; CHECK-NEXT:    br i1 [[COND17]], label [[TRUE:%.*]], label [[FALSE:%.*]]
487; CHECK:       false:
488; CHECK-NEXT:    [[ANDVAL:%.*]] = and i64 [[KEY]], 7
489; CHECK-NEXT:    store i64 [[ANDVAL]], i64* [[P64]]
490; CHECK-NEXT:    br label [[EXIT:%.*]]
491; CHECK:       true:
492; CHECK-NEXT:    [[SVALUE:%.*]] = add i64 [[KEY]], -1
493; CHECK-NEXT:    store i64 [[SVALUE]], i64* [[P64]]
494; CHECK-NEXT:    br label [[EXIT]]
495; CHECK:       exit:
496; CHECK-NEXT:    ret void
497;
498entry:
499  %key = load i64, i64* %p64, align 8
500  %cond17 = icmp eq i64 %key, 0
501  br i1 %cond17, label %true, label %false
502
503false:
504  %andval = and i64 %key, 7
505  store i64 %andval, i64* %p64
506  br label %exit
507
508true:
509  %svalue = add i64 %key, -1
510  store i64 %svalue, i64* %p64
511  br label %exit
512
513exit:
514  ret void
515}
516
517; Check that every instruction inserted by -codegenprepare has a debug location.
518; DEBUG: CheckModuleDebugify: PASS
519
520