xref: /llvm-project/llvm/test/CodeGen/X86/select-lea.ll (revision 2f448bf509432c1a19ec46ab8cbc7353c03c6280)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=X64
3; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+cmov | FileCheck %s --check-prefixes=CMOV
4; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefixes=NOCMOV
5
6; PR46809
7
8define i32 @sadd_add_imm(i32 %x, i32 %y) {
9; X64-LABEL: sadd_add_imm:
10; X64:       # %bb.0:
11; X64-NEXT:    # kill: def $edi killed $edi def $rdi
12; X64-NEXT:    addl %esi, %edi
13; X64-NEXT:    leal 100(%rdi), %eax
14; X64-NEXT:    cmovnol %edi, %eax
15; X64-NEXT:    retq
16;
17; CMOV-LABEL: sadd_add_imm:
18; CMOV:       # %bb.0:
19; CMOV-NEXT:    movl {{[0-9]+}}(%esp), %ecx
20; CMOV-NEXT:    addl {{[0-9]+}}(%esp), %ecx
21; CMOV-NEXT:    leal 100(%ecx), %eax
22; CMOV-NEXT:    cmovnol %ecx, %eax
23; CMOV-NEXT:    retl
24;
25; NOCMOV-LABEL: sadd_add_imm:
26; NOCMOV:       # %bb.0:
27; NOCMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
28; NOCMOV-NEXT:    addl {{[0-9]+}}(%esp), %eax
29; NOCMOV-NEXT:    jno .LBB0_2
30; NOCMOV-NEXT:  # %bb.1:
31; NOCMOV-NEXT:    addl $100, %eax
32; NOCMOV-NEXT:  .LBB0_2:
33; NOCMOV-NEXT:    retl
34  %o = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %x, i32 %y)
35  %v1 = extractvalue { i32, i1 } %o, 1
36  %v2 = extractvalue { i32, i1 } %o, 0
37  %a = add i32 %v2, 100
38  %r = select i1 %v1, i32 %a, i32 %v2
39  ret i32 %r
40}
41
42define i32 @sadd_add_load(i32 %x, i32 %y, ptr %pz) nounwind {
43; X64-LABEL: sadd_add_load:
44; X64:       # %bb.0:
45; X64-NEXT:    # kill: def $esi killed $esi def $rsi
46; X64-NEXT:    # kill: def $edi killed $edi def $rdi
47; X64-NEXT:    leal (%rdi,%rsi), %eax
48; X64-NEXT:    addl (%rdx), %eax
49; X64-NEXT:    addl %esi, %edi
50; X64-NEXT:    cmovnol %edi, %eax
51; X64-NEXT:    retq
52;
53; CMOV-LABEL: sadd_add_load:
54; CMOV:       # %bb.0:
55; CMOV-NEXT:    pushl %esi
56; CMOV-NEXT:    movl {{[0-9]+}}(%esp), %ecx
57; CMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
58; CMOV-NEXT:    movl {{[0-9]+}}(%esp), %edx
59; CMOV-NEXT:    leal (%eax,%edx), %esi
60; CMOV-NEXT:    addl (%ecx), %esi
61; CMOV-NEXT:    addl %edx, %eax
62; CMOV-NEXT:    cmovol %esi, %eax
63; CMOV-NEXT:    popl %esi
64; CMOV-NEXT:    retl
65;
66; NOCMOV-LABEL: sadd_add_load:
67; NOCMOV:       # %bb.0:
68; NOCMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
69; NOCMOV-NEXT:    movl {{[0-9]+}}(%esp), %edx
70; NOCMOV-NEXT:    leal (%eax,%edx), %ecx
71; NOCMOV-NEXT:    addl %edx, %eax
72; NOCMOV-NEXT:    jno .LBB1_2
73; NOCMOV-NEXT:  # %bb.1:
74; NOCMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
75; NOCMOV-NEXT:    addl (%eax), %ecx
76; NOCMOV-NEXT:    movl %ecx, %eax
77; NOCMOV-NEXT:  .LBB1_2:
78; NOCMOV-NEXT:    retl
79  %o = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %x, i32 %y)
80  %v1 = extractvalue { i32, i1 } %o, 1
81  %v2 = extractvalue { i32, i1 } %o, 0
82  %z = load i32, ptr %pz
83  %a = add i32 %v2, %z
84  %r = select i1 %v1, i32 %a, i32 %v2
85  ret i32 %r
86}
87
88define i32 @uadd_add_imm(i32 %x, i32 %y) {
89; X64-LABEL: uadd_add_imm:
90; X64:       # %bb.0:
91; X64-NEXT:    # kill: def $edi killed $edi def $rdi
92; X64-NEXT:    addl %esi, %edi
93; X64-NEXT:    leal 100(%rdi), %eax
94; X64-NEXT:    cmovael %edi, %eax
95; X64-NEXT:    retq
96;
97; CMOV-LABEL: uadd_add_imm:
98; CMOV:       # %bb.0:
99; CMOV-NEXT:    movl {{[0-9]+}}(%esp), %ecx
100; CMOV-NEXT:    addl {{[0-9]+}}(%esp), %ecx
101; CMOV-NEXT:    leal 100(%ecx), %eax
102; CMOV-NEXT:    cmovael %ecx, %eax
103; CMOV-NEXT:    retl
104;
105; NOCMOV-LABEL: uadd_add_imm:
106; NOCMOV:       # %bb.0:
107; NOCMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
108; NOCMOV-NEXT:    addl {{[0-9]+}}(%esp), %eax
109; NOCMOV-NEXT:    jae .LBB2_2
110; NOCMOV-NEXT:  # %bb.1:
111; NOCMOV-NEXT:    addl $100, %eax
112; NOCMOV-NEXT:  .LBB2_2:
113; NOCMOV-NEXT:    retl
114  %o = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
115  %v1 = extractvalue { i32, i1 } %o, 1
116  %v2 = extractvalue { i32, i1 } %o, 0
117  %a = add i32 %v2, 100
118  %r = select i1 %v1, i32 %a, i32 %v2
119  ret i32 %r
120}
121
122define i32 @uadd_add_load(i32 %x, i32 %y, ptr %pz) nounwind {
123; X64-LABEL: uadd_add_load:
124; X64:       # %bb.0:
125; X64-NEXT:    # kill: def $esi killed $esi def $rsi
126; X64-NEXT:    # kill: def $edi killed $edi def $rdi
127; X64-NEXT:    leal (%rdi,%rsi), %eax
128; X64-NEXT:    addl (%rdx), %eax
129; X64-NEXT:    addl %esi, %edi
130; X64-NEXT:    cmovael %edi, %eax
131; X64-NEXT:    retq
132;
133; CMOV-LABEL: uadd_add_load:
134; CMOV:       # %bb.0:
135; CMOV-NEXT:    pushl %esi
136; CMOV-NEXT:    movl {{[0-9]+}}(%esp), %ecx
137; CMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
138; CMOV-NEXT:    movl {{[0-9]+}}(%esp), %edx
139; CMOV-NEXT:    leal (%eax,%edx), %esi
140; CMOV-NEXT:    addl (%ecx), %esi
141; CMOV-NEXT:    addl %edx, %eax
142; CMOV-NEXT:    cmovbl %esi, %eax
143; CMOV-NEXT:    popl %esi
144; CMOV-NEXT:    retl
145;
146; NOCMOV-LABEL: uadd_add_load:
147; NOCMOV:       # %bb.0:
148; NOCMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
149; NOCMOV-NEXT:    movl {{[0-9]+}}(%esp), %edx
150; NOCMOV-NEXT:    leal (%eax,%edx), %ecx
151; NOCMOV-NEXT:    addl %edx, %eax
152; NOCMOV-NEXT:    jae .LBB3_2
153; NOCMOV-NEXT:  # %bb.1:
154; NOCMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
155; NOCMOV-NEXT:    addl (%eax), %ecx
156; NOCMOV-NEXT:    movl %ecx, %eax
157; NOCMOV-NEXT:  .LBB3_2:
158; NOCMOV-NEXT:    retl
159  %o = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
160  %v1 = extractvalue { i32, i1 } %o, 1
161  %v2 = extractvalue { i32, i1 } %o, 0
162  %z = load i32, ptr %pz
163  %a = add i32 %v2, %z
164  %r = select i1 %v1, i32 %a, i32 %v2
165  ret i32 %r
166}
167
168define i32 @ssub_add_imm(i32 %x, i32 %y) {
169; X64-LABEL: ssub_add_imm:
170; X64:       # %bb.0:
171; X64-NEXT:    # kill: def $edi killed $edi def $rdi
172; X64-NEXT:    subl %esi, %edi
173; X64-NEXT:    leal 100(%rdi), %eax
174; X64-NEXT:    cmovnol %edi, %eax
175; X64-NEXT:    retq
176;
177; CMOV-LABEL: ssub_add_imm:
178; CMOV:       # %bb.0:
179; CMOV-NEXT:    movl {{[0-9]+}}(%esp), %ecx
180; CMOV-NEXT:    subl {{[0-9]+}}(%esp), %ecx
181; CMOV-NEXT:    leal 100(%ecx), %eax
182; CMOV-NEXT:    cmovnol %ecx, %eax
183; CMOV-NEXT:    retl
184;
185; NOCMOV-LABEL: ssub_add_imm:
186; NOCMOV:       # %bb.0:
187; NOCMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
188; NOCMOV-NEXT:    subl {{[0-9]+}}(%esp), %eax
189; NOCMOV-NEXT:    jno .LBB4_2
190; NOCMOV-NEXT:  # %bb.1:
191; NOCMOV-NEXT:    addl $100, %eax
192; NOCMOV-NEXT:  .LBB4_2:
193; NOCMOV-NEXT:    retl
194  %o = tail call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %x, i32 %y)
195  %v1 = extractvalue { i32, i1 } %o, 1
196  %v2 = extractvalue { i32, i1 } %o, 0
197  %a = add i32 %v2, 100
198  %r = select i1 %v1, i32 %a, i32 %v2
199  ret i32 %r
200}
201
202define i32 @ssub_add_load(i32 %x, i32 %y, ptr %pz) nounwind {
203; X64-LABEL: ssub_add_load:
204; X64:       # %bb.0:
205; X64-NEXT:    movl %edi, %eax
206; X64-NEXT:    subl %esi, %eax
207; X64-NEXT:    addl (%rdx), %eax
208; X64-NEXT:    subl %esi, %edi
209; X64-NEXT:    cmovnol %edi, %eax
210; X64-NEXT:    retq
211;
212; CMOV-LABEL: ssub_add_load:
213; CMOV:       # %bb.0:
214; CMOV-NEXT:    pushl %esi
215; CMOV-NEXT:    movl {{[0-9]+}}(%esp), %ecx
216; CMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
217; CMOV-NEXT:    movl {{[0-9]+}}(%esp), %edx
218; CMOV-NEXT:    movl %eax, %esi
219; CMOV-NEXT:    subl %edx, %esi
220; CMOV-NEXT:    addl (%ecx), %esi
221; CMOV-NEXT:    subl %edx, %eax
222; CMOV-NEXT:    cmovol %esi, %eax
223; CMOV-NEXT:    popl %esi
224; CMOV-NEXT:    retl
225;
226; NOCMOV-LABEL: ssub_add_load:
227; NOCMOV:       # %bb.0:
228; NOCMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
229; NOCMOV-NEXT:    movl {{[0-9]+}}(%esp), %edx
230; NOCMOV-NEXT:    movl %eax, %ecx
231; NOCMOV-NEXT:    subl %edx, %ecx
232; NOCMOV-NEXT:    subl %edx, %eax
233; NOCMOV-NEXT:    jno .LBB5_2
234; NOCMOV-NEXT:  # %bb.1:
235; NOCMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
236; NOCMOV-NEXT:    addl (%eax), %ecx
237; NOCMOV-NEXT:    movl %ecx, %eax
238; NOCMOV-NEXT:  .LBB5_2:
239; NOCMOV-NEXT:    retl
240  %o = tail call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %x, i32 %y)
241  %v1 = extractvalue { i32, i1 } %o, 1
242  %v2 = extractvalue { i32, i1 } %o, 0
243  %z = load i32, ptr %pz
244  %a = add i32 %v2, %z
245  %r = select i1 %v1, i32 %a, i32 %v2
246  ret i32 %r
247}
248
249define i32 @usub_add_imm(i32 %x, i32 %y) {
250; X64-LABEL: usub_add_imm:
251; X64:       # %bb.0:
252; X64-NEXT:    # kill: def $edi killed $edi def $rdi
253; X64-NEXT:    subl %esi, %edi
254; X64-NEXT:    leal 100(%rdi), %eax
255; X64-NEXT:    cmovael %edi, %eax
256; X64-NEXT:    retq
257;
258; CMOV-LABEL: usub_add_imm:
259; CMOV:       # %bb.0:
260; CMOV-NEXT:    movl {{[0-9]+}}(%esp), %ecx
261; CMOV-NEXT:    subl {{[0-9]+}}(%esp), %ecx
262; CMOV-NEXT:    leal 100(%ecx), %eax
263; CMOV-NEXT:    cmovael %ecx, %eax
264; CMOV-NEXT:    retl
265;
266; NOCMOV-LABEL: usub_add_imm:
267; NOCMOV:       # %bb.0:
268; NOCMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
269; NOCMOV-NEXT:    subl {{[0-9]+}}(%esp), %eax
270; NOCMOV-NEXT:    jae .LBB6_2
271; NOCMOV-NEXT:  # %bb.1:
272; NOCMOV-NEXT:    addl $100, %eax
273; NOCMOV-NEXT:  .LBB6_2:
274; NOCMOV-NEXT:    retl
275  %o = tail call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %x, i32 %y)
276  %v1 = extractvalue { i32, i1 } %o, 1
277  %v2 = extractvalue { i32, i1 } %o, 0
278  %a = add i32 %v2, 100
279  %r = select i1 %v1, i32 %a, i32 %v2
280  ret i32 %r
281}
282
283define i32 @usub_add_load(i32 %x, i32 %y, ptr %pz) nounwind {
284; X64-LABEL: usub_add_load:
285; X64:       # %bb.0:
286; X64-NEXT:    movl %edi, %eax
287; X64-NEXT:    subl %esi, %eax
288; X64-NEXT:    addl (%rdx), %eax
289; X64-NEXT:    subl %esi, %edi
290; X64-NEXT:    cmovael %edi, %eax
291; X64-NEXT:    retq
292;
293; CMOV-LABEL: usub_add_load:
294; CMOV:       # %bb.0:
295; CMOV-NEXT:    pushl %esi
296; CMOV-NEXT:    movl {{[0-9]+}}(%esp), %ecx
297; CMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
298; CMOV-NEXT:    movl {{[0-9]+}}(%esp), %edx
299; CMOV-NEXT:    movl %eax, %esi
300; CMOV-NEXT:    subl %edx, %esi
301; CMOV-NEXT:    addl (%ecx), %esi
302; CMOV-NEXT:    subl %edx, %eax
303; CMOV-NEXT:    cmovbl %esi, %eax
304; CMOV-NEXT:    popl %esi
305; CMOV-NEXT:    retl
306;
307; NOCMOV-LABEL: usub_add_load:
308; NOCMOV:       # %bb.0:
309; NOCMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
310; NOCMOV-NEXT:    movl {{[0-9]+}}(%esp), %edx
311; NOCMOV-NEXT:    movl %eax, %ecx
312; NOCMOV-NEXT:    subl %edx, %ecx
313; NOCMOV-NEXT:    subl %edx, %eax
314; NOCMOV-NEXT:    jae .LBB7_2
315; NOCMOV-NEXT:  # %bb.1:
316; NOCMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
317; NOCMOV-NEXT:    addl (%eax), %ecx
318; NOCMOV-NEXT:    movl %ecx, %eax
319; NOCMOV-NEXT:  .LBB7_2:
320; NOCMOV-NEXT:    retl
321  %o = tail call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %x, i32 %y)
322  %v1 = extractvalue { i32, i1 } %o, 1
323  %v2 = extractvalue { i32, i1 } %o, 0
324  %z = load i32, ptr %pz
325  %a = add i32 %v2, %z
326  %r = select i1 %v1, i32 %a, i32 %v2
327  ret i32 %r
328}
329
330define i32 @smul_add_imm(i32 %x, i32 %y) {
331; X64-LABEL: smul_add_imm:
332; X64:       # %bb.0:
333; X64-NEXT:    # kill: def $edi killed $edi def $rdi
334; X64-NEXT:    imull %esi, %edi
335; X64-NEXT:    leal 100(%rdi), %eax
336; X64-NEXT:    cmovnol %edi, %eax
337; X64-NEXT:    retq
338;
339; CMOV-LABEL: smul_add_imm:
340; CMOV:       # %bb.0:
341; CMOV-NEXT:    movl {{[0-9]+}}(%esp), %ecx
342; CMOV-NEXT:    imull {{[0-9]+}}(%esp), %ecx
343; CMOV-NEXT:    leal 100(%ecx), %eax
344; CMOV-NEXT:    cmovnol %ecx, %eax
345; CMOV-NEXT:    retl
346;
347; NOCMOV-LABEL: smul_add_imm:
348; NOCMOV:       # %bb.0:
349; NOCMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
350; NOCMOV-NEXT:    imull {{[0-9]+}}(%esp), %eax
351; NOCMOV-NEXT:    jno .LBB8_2
352; NOCMOV-NEXT:  # %bb.1:
353; NOCMOV-NEXT:    addl $100, %eax
354; NOCMOV-NEXT:  .LBB8_2:
355; NOCMOV-NEXT:    retl
356  %o = tail call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %x, i32 %y)
357  %v1 = extractvalue { i32, i1 } %o, 1
358  %v2 = extractvalue { i32, i1 } %o, 0
359  %a = add i32 %v2, 100
360  %r = select i1 %v1, i32 %a, i32 %v2
361  ret i32 %r
362}
363
364define i32 @smul_add_load(i32 %x, i32 %y, ptr %pz) nounwind {
365; X64-LABEL: smul_add_load:
366; X64:       # %bb.0:
367; X64-NEXT:    movl %edi, %eax
368; X64-NEXT:    imull %esi, %eax
369; X64-NEXT:    addl (%rdx), %eax
370; X64-NEXT:    imull %esi, %edi
371; X64-NEXT:    cmovnol %edi, %eax
372; X64-NEXT:    retq
373;
374; CMOV-LABEL: smul_add_load:
375; CMOV:       # %bb.0:
376; CMOV-NEXT:    pushl %esi
377; CMOV-NEXT:    movl {{[0-9]+}}(%esp), %ecx
378; CMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
379; CMOV-NEXT:    movl {{[0-9]+}}(%esp), %edx
380; CMOV-NEXT:    movl %eax, %esi
381; CMOV-NEXT:    imull %edx, %esi
382; CMOV-NEXT:    addl (%ecx), %esi
383; CMOV-NEXT:    imull %edx, %eax
384; CMOV-NEXT:    cmovol %esi, %eax
385; CMOV-NEXT:    popl %esi
386; CMOV-NEXT:    retl
387;
388; NOCMOV-LABEL: smul_add_load:
389; NOCMOV:       # %bb.0:
390; NOCMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
391; NOCMOV-NEXT:    movl {{[0-9]+}}(%esp), %edx
392; NOCMOV-NEXT:    movl %eax, %ecx
393; NOCMOV-NEXT:    imull %edx, %ecx
394; NOCMOV-NEXT:    imull %edx, %eax
395; NOCMOV-NEXT:    jno .LBB9_2
396; NOCMOV-NEXT:  # %bb.1:
397; NOCMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
398; NOCMOV-NEXT:    addl (%eax), %ecx
399; NOCMOV-NEXT:    movl %ecx, %eax
400; NOCMOV-NEXT:  .LBB9_2:
401; NOCMOV-NEXT:    retl
402  %o = tail call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %x, i32 %y)
403  %v1 = extractvalue { i32, i1 } %o, 1
404  %v2 = extractvalue { i32, i1 } %o, 0
405  %z = load i32, ptr %pz
406  %a = add i32 %v2, %z
407  %r = select i1 %v1, i32 %a, i32 %v2
408  ret i32 %r
409}
410
411define i32 @umul_add_imm(i32 %x, i32 %y) {
412; X64-LABEL: umul_add_imm:
413; X64:       # %bb.0:
414; X64-NEXT:    movl %edi, %eax
415; X64-NEXT:    mull %esi
416; X64-NEXT:    # kill: def $eax killed $eax def $rax
417; X64-NEXT:    leal 100(%rax), %ecx
418; X64-NEXT:    cmovol %ecx, %eax
419; X64-NEXT:    # kill: def $eax killed $eax killed $rax
420; X64-NEXT:    retq
421;
422; CMOV-LABEL: umul_add_imm:
423; CMOV:       # %bb.0:
424; CMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
425; CMOV-NEXT:    mull {{[0-9]+}}(%esp)
426; CMOV-NEXT:    leal 100(%eax), %ecx
427; CMOV-NEXT:    cmovol %ecx, %eax
428; CMOV-NEXT:    retl
429;
430; NOCMOV-LABEL: umul_add_imm:
431; NOCMOV:       # %bb.0:
432; NOCMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
433; NOCMOV-NEXT:    mull {{[0-9]+}}(%esp)
434; NOCMOV-NEXT:    jno .LBB10_2
435; NOCMOV-NEXT:  # %bb.1:
436; NOCMOV-NEXT:    addl $100, %eax
437; NOCMOV-NEXT:  .LBB10_2:
438; NOCMOV-NEXT:    retl
439  %o = tail call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y)
440  %v1 = extractvalue { i32, i1 } %o, 1
441  %v2 = extractvalue { i32, i1 } %o, 0
442  %a = add i32 %v2, 100
443  %r = select i1 %v1, i32 %a, i32 %v2
444  ret i32 %r
445}
446
447define i32 @umul_add_load(i32 %x, i32 %y, ptr %pz) nounwind {
448; X64-LABEL: umul_add_load:
449; X64:       # %bb.0:
450; X64-NEXT:    movq %rdx, %rcx
451; X64-NEXT:    movl %edi, %eax
452; X64-NEXT:    mull %esi
453; X64-NEXT:    seto %dl
454; X64-NEXT:    movl (%rcx), %ecx
455; X64-NEXT:    addl %eax, %ecx
456; X64-NEXT:    testb %dl, %dl
457; X64-NEXT:    cmovnel %ecx, %eax
458; X64-NEXT:    retq
459;
460; CMOV-LABEL: umul_add_load:
461; CMOV:       # %bb.0:
462; CMOV-NEXT:    movl {{[0-9]+}}(%esp), %ecx
463; CMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
464; CMOV-NEXT:    mull {{[0-9]+}}(%esp)
465; CMOV-NEXT:    seto %dl
466; CMOV-NEXT:    movl (%ecx), %ecx
467; CMOV-NEXT:    addl %eax, %ecx
468; CMOV-NEXT:    testb %dl, %dl
469; CMOV-NEXT:    cmovnel %ecx, %eax
470; CMOV-NEXT:    retl
471;
472; NOCMOV-LABEL: umul_add_load:
473; NOCMOV:       # %bb.0:
474; NOCMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
475; NOCMOV-NEXT:    mull {{[0-9]+}}(%esp)
476; NOCMOV-NEXT:    jno .LBB11_2
477; NOCMOV-NEXT:  # %bb.1:
478; NOCMOV-NEXT:    movl {{[0-9]+}}(%esp), %ecx
479; NOCMOV-NEXT:    addl (%ecx), %eax
480; NOCMOV-NEXT:  .LBB11_2:
481; NOCMOV-NEXT:    retl
482  %o = tail call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y)
483  %v1 = extractvalue { i32, i1 } %o, 1
484  %v2 = extractvalue { i32, i1 } %o, 0
485  %z = load i32, ptr %pz
486  %a = add i32 %v2, %z
487  %r = select i1 %v1, i32 %a, i32 %v2
488  ret i32 %r
489}
490
491declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32)
492declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32)
493declare { i32, i1 } @llvm.ssub.with.overflow.i32(i32, i32)
494declare { i32, i1 } @llvm.usub.with.overflow.i32(i32, i32)
495declare { i32, i1 } @llvm.smul.with.overflow.i32(i32, i32)
496declare { i32, i1 } @llvm.umul.with.overflow.i32(i32, i32)
497