1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -passes=instcombine -S | FileCheck %s
3
4; Fold
5;   ((ptr %y) u/ %x) == %y
6; to
7;   @llvm.umul.with.overflow(%x, %y) + extractvalue + not
8
9define i1 @t0_basic(i8 %x, i8 %y) {
10; CHECK-LABEL: @t0_basic(
11; CHECK-NEXT:    [[MUL:%.*]] = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 [[X:%.*]], i8 [[Y:%.*]])
12; CHECK-NEXT:    [[MUL_OV:%.*]] = extractvalue { i8, i1 } [[MUL]], 1
13; CHECK-NEXT:    [[MUL_NOT_OV:%.*]] = xor i1 [[MUL_OV]], true
14; CHECK-NEXT:    ret i1 [[MUL_NOT_OV]]
15;
16  %t0 = mul i8 %x, %y
17  %t1 = udiv i8 %t0, %x
18  %r = icmp eq i8 %t1, %y
19  ret i1 %r
20}
21
22define <2 x i1> @t1_vec(<2 x i8> %x, <2 x i8> %y) {
23; CHECK-LABEL: @t1_vec(
24; CHECK-NEXT:    [[MUL:%.*]] = call { <2 x i8>, <2 x i1> } @llvm.umul.with.overflow.v2i8(<2 x i8> [[X:%.*]], <2 x i8> [[Y:%.*]])
25; CHECK-NEXT:    [[MUL_OV:%.*]] = extractvalue { <2 x i8>, <2 x i1> } [[MUL]], 1
26; CHECK-NEXT:    [[MUL_NOT_OV:%.*]] = xor <2 x i1> [[MUL_OV]], splat (i1 true)
27; CHECK-NEXT:    ret <2 x i1> [[MUL_NOT_OV]]
28;
29  %t0 = mul <2 x i8> %x, %y
30  %t1 = udiv <2 x i8> %t0, %x
31  %r = icmp eq <2 x i8> %t1, %y
32  ret <2 x i1> %r
33}
34
35declare i8 @gen8()
36
37define i1 @t2_commutative(i8 %x) {
38; CHECK-LABEL: @t2_commutative(
39; CHECK-NEXT:    [[Y:%.*]] = call i8 @gen8()
40; CHECK-NEXT:    [[MUL:%.*]] = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 [[X:%.*]], i8 [[Y]])
41; CHECK-NEXT:    [[MUL_OV:%.*]] = extractvalue { i8, i1 } [[MUL]], 1
42; CHECK-NEXT:    [[MUL_NOT_OV:%.*]] = xor i1 [[MUL_OV]], true
43; CHECK-NEXT:    ret i1 [[MUL_NOT_OV]]
44;
45  %y = call i8 @gen8()
46  %t0 = mul i8 %y, %x ; swapped
47  %t1 = udiv i8 %t0, %x
48  %r = icmp eq i8 %t1, %y
49  ret i1 %r
50}
51
52define i1 @t3_commutative(i8 %x) {
53; CHECK-LABEL: @t3_commutative(
54; CHECK-NEXT:    [[Y:%.*]] = call i8 @gen8()
55; CHECK-NEXT:    [[MUL:%.*]] = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 [[X:%.*]], i8 [[Y]])
56; CHECK-NEXT:    [[MUL_OV:%.*]] = extractvalue { i8, i1 } [[MUL]], 1
57; CHECK-NEXT:    [[MUL_NOT_OV:%.*]] = xor i1 [[MUL_OV]], true
58; CHECK-NEXT:    ret i1 [[MUL_NOT_OV]]
59;
60  %y = call i8 @gen8()
61  %t0 = mul i8 %y, %x ; swapped
62  %t1 = udiv i8 %t0, %x
63  %r = icmp eq i8 %t1, %y
64  ret i1 %r
65}
66
67define i1 @t4_commutative(i8 %x) {
68; CHECK-LABEL: @t4_commutative(
69; CHECK-NEXT:    [[Y:%.*]] = call i8 @gen8()
70; CHECK-NEXT:    [[MUL:%.*]] = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 [[X:%.*]], i8 [[Y]])
71; CHECK-NEXT:    [[MUL_OV:%.*]] = extractvalue { i8, i1 } [[MUL]], 1
72; CHECK-NEXT:    [[MUL_NOT_OV:%.*]] = xor i1 [[MUL_OV]], true
73; CHECK-NEXT:    ret i1 [[MUL_NOT_OV]]
74;
75  %y = call i8 @gen8()
76  %t0 = mul i8 %y, %x ; swapped
77  %t1 = udiv i8 %t0, %x
78  %r = icmp eq i8 %y, %t1 ; swapped
79  ret i1 %r
80}
81
82; Extra-use tests
83
84declare void @use8(i8)
85
86define i1 @t5_extrause0(i8 %x, i8 %y) {
87; CHECK-LABEL: @t5_extrause0(
88; CHECK-NEXT:    [[MUL:%.*]] = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 [[X:%.*]], i8 [[Y:%.*]])
89; CHECK-NEXT:    [[MUL_VAL:%.*]] = extractvalue { i8, i1 } [[MUL]], 0
90; CHECK-NEXT:    [[MUL_OV:%.*]] = extractvalue { i8, i1 } [[MUL]], 1
91; CHECK-NEXT:    [[MUL_NOT_OV:%.*]] = xor i1 [[MUL_OV]], true
92; CHECK-NEXT:    call void @use8(i8 [[MUL_VAL]])
93; CHECK-NEXT:    ret i1 [[MUL_NOT_OV]]
94;
95  %t0 = mul i8 %x, %y
96  call void @use8(i8 %t0)
97  %t1 = udiv i8 %t0, %x
98  %r = icmp eq i8 %t1, %y
99  ret i1 %r
100}
101
102define i1 @t6_extrause1(i8 %x, i8 %y) {
103; CHECK-LABEL: @t6_extrause1(
104; CHECK-NEXT:    [[T0:%.*]] = mul i8 [[X:%.*]], [[Y:%.*]]
105; CHECK-NEXT:    [[T1:%.*]] = udiv i8 [[T0]], [[X]]
106; CHECK-NEXT:    call void @use8(i8 [[T1]])
107; CHECK-NEXT:    [[R:%.*]] = icmp eq i8 [[T1]], [[Y]]
108; CHECK-NEXT:    ret i1 [[R]]
109;
110  %t0 = mul i8 %x, %y
111  %t1 = udiv i8 %t0, %x
112  call void @use8(i8 %t1)
113  %r = icmp eq i8 %t1, %y
114  ret i1 %r
115}
116
117define i1 @t7_extrause2(i8 %x, i8 %y) {
118; CHECK-LABEL: @t7_extrause2(
119; CHECK-NEXT:    [[T0:%.*]] = mul i8 [[X:%.*]], [[Y:%.*]]
120; CHECK-NEXT:    call void @use8(i8 [[T0]])
121; CHECK-NEXT:    [[T1:%.*]] = udiv i8 [[T0]], [[X]]
122; CHECK-NEXT:    call void @use8(i8 [[T1]])
123; CHECK-NEXT:    [[R:%.*]] = icmp eq i8 [[T1]], [[Y]]
124; CHECK-NEXT:    ret i1 [[R]]
125;
126  %t0 = mul i8 %x, %y
127  call void @use8(i8 %t0)
128  %t1 = udiv i8 %t0, %x
129  call void @use8(i8 %t1)
130  %r = icmp eq i8 %t1, %y
131  ret i1 %r
132}
133
134; Negative tests
135
136define i1 @n8_different_x(i8 %x0, i8 %x1, i8 %y) {
137; CHECK-LABEL: @n8_different_x(
138; CHECK-NEXT:    [[T0:%.*]] = mul i8 [[X0:%.*]], [[Y:%.*]]
139; CHECK-NEXT:    [[T1:%.*]] = udiv i8 [[T0]], [[X1:%.*]]
140; CHECK-NEXT:    [[R:%.*]] = icmp eq i8 [[T1]], [[Y]]
141; CHECK-NEXT:    ret i1 [[R]]
142;
143  %t0 = mul i8 %x0, %y
144  %t1 = udiv i8 %t0, %x1
145  %r = icmp eq i8 %t1, %y
146  ret i1 %r
147}
148
149define i1 @n9_different_y(i8 %x, i8 %y0, i8 %y1) {
150; CHECK-LABEL: @n9_different_y(
151; CHECK-NEXT:    [[T0:%.*]] = mul i8 [[X:%.*]], [[Y0:%.*]]
152; CHECK-NEXT:    [[T1:%.*]] = udiv i8 [[T0]], [[X]]
153; CHECK-NEXT:    [[R:%.*]] = icmp eq i8 [[T1]], [[Y1:%.*]]
154; CHECK-NEXT:    ret i1 [[R]]
155;
156  %t0 = mul i8 %x, %y0
157  %t1 = udiv i8 %t0, %x
158  %r = icmp eq i8 %t1, %y1
159  ret i1 %r
160}
161
162define i1 @n10_wrong_pred(i8 %x, i8 %y) {
163; CHECK-LABEL: @n10_wrong_pred(
164; CHECK-NEXT:    [[T0:%.*]] = mul i8 [[X:%.*]], [[Y:%.*]]
165; CHECK-NEXT:    [[T1:%.*]] = udiv i8 [[T0]], [[X]]
166; CHECK-NEXT:    [[R:%.*]] = icmp ult i8 [[T1]], [[Y]]
167; CHECK-NEXT:    ret i1 [[R]]
168;
169  %t0 = mul i8 %x, %y
170  %t1 = udiv i8 %t0, %x
171  %r = icmp ult i8 %t1, %y
172  ret i1 %r
173}
174