xref: /llvm-project/llvm/test/CodeGen/X86/umul-with-overflow.ll (revision 7b3bbd83c0c24087072ec5b22a76799ab31f87d5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown-linux-gnu | FileCheck %s --check-prefix=X86
3; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefix=X64
4
5declare {i32, i1} @llvm.umul.with.overflow.i32(i32 %a, i32 %b)
6
7define zeroext i1 @a(i32 %x)  nounwind {
8; X86-LABEL: a:
9; X86:       # %bb.0:
10; X86-NEXT:    movl $3, %eax
11; X86-NEXT:    mull {{[0-9]+}}(%esp)
12; X86-NEXT:    seto %al
13; X86-NEXT:    retl
14;
15; X64-LABEL: a:
16; X64:       # %bb.0:
17; X64-NEXT:    movl %edi, %eax
18; X64-NEXT:    movl $3, %ecx
19; X64-NEXT:    mull %ecx
20; X64-NEXT:    seto %al
21; X64-NEXT:    retq
22  %res = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %x, i32 3)
23  %obil = extractvalue {i32, i1} %res, 1
24  ret i1 %obil
25}
26
27define i32 @test2(i32 %a, i32 %b) nounwind readnone {
28; X86-LABEL: test2:
29; X86:       # %bb.0: # %entry
30; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
31; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
32; X86-NEXT:    addl %eax, %eax
33; X86-NEXT:    retl
34;
35; X64-LABEL: test2:
36; X64:       # %bb.0: # %entry
37; X64-NEXT:    # kill: def $esi killed $esi def $rsi
38; X64-NEXT:    # kill: def $edi killed $edi def $rdi
39; X64-NEXT:    leal (%rdi,%rsi), %eax
40; X64-NEXT:    addl %eax, %eax
41; X64-NEXT:    retq
42entry:
43	%tmp0 = add i32 %b, %a
44	%tmp1 = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %tmp0, i32 2)
45	%tmp2 = extractvalue { i32, i1 } %tmp1, 0
46	ret i32 %tmp2
47}
48
49define i32 @test3(i32 %a, i32 %b) nounwind readnone {
50; X86-LABEL: test3:
51; X86:       # %bb.0: # %entry
52; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
53; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
54; X86-NEXT:    movl $4, %ecx
55; X86-NEXT:    mull %ecx
56; X86-NEXT:    retl
57;
58; X64-LABEL: test3:
59; X64:       # %bb.0: # %entry
60; X64-NEXT:    # kill: def $esi killed $esi def $rsi
61; X64-NEXT:    # kill: def $edi killed $edi def $rdi
62; X64-NEXT:    leal (%rdi,%rsi), %eax
63; X64-NEXT:    movl $4, %ecx
64; X64-NEXT:    mull %ecx
65; X64-NEXT:    retq
66entry:
67	%tmp0 = add i32 %b, %a
68	%tmp1 = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %tmp0, i32 4)
69	%tmp2 = extractvalue { i32, i1 } %tmp1, 0
70	ret i32 %tmp2
71}
72
73; Check that shifts larger than the shift amount type are handled.
74; Intentionally not testing codegen here, only that this doesn't assert.
75declare {i300, i1} @llvm.umul.with.overflow.i300(i300 %a, i300 %b)
76define i300 @test4(i300 %a, i300 %b) nounwind {
77; X86-LABEL: test4:
78; X86:       # %bb.0:
79; X86-NEXT:    pushl %ebp
80; X86-NEXT:    pushl %ebx
81; X86-NEXT:    pushl %edi
82; X86-NEXT:    pushl %esi
83; X86-NEXT:    subl $76, %esp
84; X86-NEXT:    movl $4095, %ecx # imm = 0xFFF
85; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
86; X86-NEXT:    andl %ecx, %eax
87; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
88; X86-NEXT:    andl {{[0-9]+}}(%esp), %ecx
89; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
90; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
91; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
92; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
93; X86-NEXT:    movl %ebx, %eax
94; X86-NEXT:    mull %edi
95; X86-NEXT:    movl %edx, %esi
96; X86-NEXT:    movl %eax, (%esp) # 4-byte Spill
97; X86-NEXT:    movl %ecx, %eax
98; X86-NEXT:    mull %edi
99; X86-NEXT:    movl %edx, %ecx
100; X86-NEXT:    movl %eax, %edi
101; X86-NEXT:    addl %esi, %edi
102; X86-NEXT:    adcl $0, %ecx
103; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
104; X86-NEXT:    movl %ebx, %eax
105; X86-NEXT:    mull %ebp
106; X86-NEXT:    movl %edx, %esi
107; X86-NEXT:    movl %eax, %ebx
108; X86-NEXT:    addl %edi, %ebx
109; X86-NEXT:    adcl %ecx, %esi
110; X86-NEXT:    setb %cl
111; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
112; X86-NEXT:    mull %ebp
113; X86-NEXT:    addl %esi, %eax
114; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
115; X86-NEXT:    movzbl %cl, %eax
116; X86-NEXT:    adcl %eax, %edx
117; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
118; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
119; X86-NEXT:    movl %ebp, %eax
120; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
121; X86-NEXT:    mull %ecx
122; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
123; X86-NEXT:    movl %edx, %edi
124; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
125; X86-NEXT:    mull %ecx
126; X86-NEXT:    movl %edx, %esi
127; X86-NEXT:    movl %eax, %ecx
128; X86-NEXT:    addl %edi, %ecx
129; X86-NEXT:    adcl $0, %esi
130; X86-NEXT:    movl %ebp, %eax
131; X86-NEXT:    mull {{[0-9]+}}(%esp)
132; X86-NEXT:    movl %edx, %edi
133; X86-NEXT:    addl %ecx, %eax
134; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
135; X86-NEXT:    adcl %esi, %edi
136; X86-NEXT:    setb %cl
137; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
138; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
139; X86-NEXT:    mull %edx
140; X86-NEXT:    movl %edx, %ebp
141; X86-NEXT:    movl %eax, %esi
142; X86-NEXT:    addl %edi, %esi
143; X86-NEXT:    movzbl %cl, %eax
144; X86-NEXT:    adcl %eax, %ebp
145; X86-NEXT:    addl (%esp), %esi # 4-byte Folded Reload
146; X86-NEXT:    adcl %ebx, %ebp
147; X86-NEXT:    adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
148; X86-NEXT:    adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
149; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
150; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
151; X86-NEXT:    mull %edi
152; X86-NEXT:    movl %edx, %ecx
153; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
154; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
155; X86-NEXT:    mull %edi
156; X86-NEXT:    movl %edx, %edi
157; X86-NEXT:    movl %eax, %ebx
158; X86-NEXT:    addl %ecx, %ebx
159; X86-NEXT:    adcl $0, %edi
160; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
161; X86-NEXT:    mull {{[0-9]+}}(%esp)
162; X86-NEXT:    movl %edx, %ecx
163; X86-NEXT:    addl %ebx, %eax
164; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
165; X86-NEXT:    adcl %edi, %ecx
166; X86-NEXT:    setb (%esp) # 1-byte Folded Spill
167; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
168; X86-NEXT:    mull {{[0-9]+}}(%esp)
169; X86-NEXT:    movl %edx, %edi
170; X86-NEXT:    movl %eax, %ebx
171; X86-NEXT:    addl %ecx, %ebx
172; X86-NEXT:    movzbl (%esp), %eax # 1-byte Folded Reload
173; X86-NEXT:    adcl %eax, %edi
174; X86-NEXT:    addl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
175; X86-NEXT:    adcl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
176; X86-NEXT:    adcl $0, %ebx
177; X86-NEXT:    adcl $0, %edi
178; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
179; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
180; X86-NEXT:    setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
181; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
182; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
183; X86-NEXT:    mull %ecx
184; X86-NEXT:    movl %edx, %esi
185; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
186; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
187; X86-NEXT:    mull %ecx
188; X86-NEXT:    movl %edx, %ecx
189; X86-NEXT:    movl %eax, %ebp
190; X86-NEXT:    addl %esi, %ebp
191; X86-NEXT:    adcl $0, %ecx
192; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
193; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
194; X86-NEXT:    mull %edx
195; X86-NEXT:    movl %edx, %esi
196; X86-NEXT:    addl %ebp, %eax
197; X86-NEXT:    movl %eax, %ebp
198; X86-NEXT:    adcl %ecx, %esi
199; X86-NEXT:    setb %cl
200; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
201; X86-NEXT:    mull {{[0-9]+}}(%esp)
202; X86-NEXT:    addl %esi, %eax
203; X86-NEXT:    movl %eax, %esi
204; X86-NEXT:    movzbl %cl, %eax
205; X86-NEXT:    adcl %eax, %edx
206; X86-NEXT:    addl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
207; X86-NEXT:    adcl %edi, %ebp
208; X86-NEXT:    movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
209; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
210; X86-NEXT:    adcl %eax, %esi
211; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
212; X86-NEXT:    adcl $0, %edx
213; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
214; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
215; X86-NEXT:    movl %ebx, %eax
216; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
217; X86-NEXT:    mull %ecx
218; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
219; X86-NEXT:    movl %edx, %esi
220; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
221; X86-NEXT:    movl %ebp, %eax
222; X86-NEXT:    mull %ecx
223; X86-NEXT:    movl %edx, %ecx
224; X86-NEXT:    movl %eax, %edi
225; X86-NEXT:    addl %esi, %edi
226; X86-NEXT:    adcl $0, %ecx
227; X86-NEXT:    movl %ebx, %eax
228; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
229; X86-NEXT:    mull %ebx
230; X86-NEXT:    movl %edx, %esi
231; X86-NEXT:    addl %edi, %eax
232; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
233; X86-NEXT:    adcl %ecx, %esi
234; X86-NEXT:    setb %cl
235; X86-NEXT:    movl %ebp, %eax
236; X86-NEXT:    mull %ebx
237; X86-NEXT:    addl %esi, %eax
238; X86-NEXT:    movl %eax, (%esp) # 4-byte Spill
239; X86-NEXT:    movzbl %cl, %eax
240; X86-NEXT:    adcl %eax, %edx
241; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
242; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
243; X86-NEXT:    movl %ebx, %eax
244; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
245; X86-NEXT:    mull %ebp
246; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
247; X86-NEXT:    movl %edx, %edi
248; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
249; X86-NEXT:    mull %ebp
250; X86-NEXT:    movl %edx, %esi
251; X86-NEXT:    movl %eax, %ecx
252; X86-NEXT:    addl %edi, %ecx
253; X86-NEXT:    adcl $0, %esi
254; X86-NEXT:    movl %ebx, %eax
255; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
256; X86-NEXT:    mull %ebx
257; X86-NEXT:    movl %edx, %edi
258; X86-NEXT:    addl %ecx, %eax
259; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
260; X86-NEXT:    adcl %esi, %edi
261; X86-NEXT:    setb %cl
262; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
263; X86-NEXT:    movl %esi, %eax
264; X86-NEXT:    mull %ebx
265; X86-NEXT:    movl %edx, %ebx
266; X86-NEXT:    movl %eax, %ebp
267; X86-NEXT:    addl %edi, %ebp
268; X86-NEXT:    movzbl %cl, %eax
269; X86-NEXT:    adcl %eax, %ebx
270; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
271; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
272; X86-NEXT:    adcl $0, (%esp) # 4-byte Folded Spill
273; X86-NEXT:    adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
274; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
275; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
276; X86-NEXT:    mull %edi
277; X86-NEXT:    movl %edx, %ecx
278; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
279; X86-NEXT:    movl %esi, %eax
280; X86-NEXT:    mull %edi
281; X86-NEXT:    movl %edx, %esi
282; X86-NEXT:    movl %eax, %edi
283; X86-NEXT:    addl %ecx, %edi
284; X86-NEXT:    adcl $0, %esi
285; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
286; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
287; X86-NEXT:    mull %ecx
288; X86-NEXT:    movl %edx, %ecx
289; X86-NEXT:    addl %edi, %eax
290; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
291; X86-NEXT:    adcl %esi, %ecx
292; X86-NEXT:    setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
293; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
294; X86-NEXT:    mull {{[0-9]+}}(%esp)
295; X86-NEXT:    movl %edx, %esi
296; X86-NEXT:    movl %eax, %edi
297; X86-NEXT:    addl %ecx, %edi
298; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
299; X86-NEXT:    adcl %eax, %esi
300; X86-NEXT:    addl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
301; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
302; X86-NEXT:    adcl %ebx, %ebp
303; X86-NEXT:    adcl $0, %edi
304; X86-NEXT:    adcl $0, %esi
305; X86-NEXT:    addl (%esp), %edi # 4-byte Folded Reload
306; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
307; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
308; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
309; X86-NEXT:    imull %edx, %ecx
310; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
311; X86-NEXT:    movl %ebx, %eax
312; X86-NEXT:    mull %edx
313; X86-NEXT:    addl %edx, %ecx
314; X86-NEXT:    imull {{[0-9]+}}(%esp), %ebx
315; X86-NEXT:    addl %ecx, %ebx
316; X86-NEXT:    movl %eax, %edx
317; X86-NEXT:    addl %edi, %edx
318; X86-NEXT:    adcl %esi, %ebx
319; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
320; X86-NEXT:    addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
321; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
322; X86-NEXT:    adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
323; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
324; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
325; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
326; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
327; X86-NEXT:    movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
328; X86-NEXT:    adcl $0, %edx
329; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
330; X86-NEXT:    adcl $0, %ebx
331; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
332; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
333; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
334; X86-NEXT:    movl %esi, %eax
335; X86-NEXT:    mull %ecx
336; X86-NEXT:    movl %edx, %edi
337; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
338; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
339; X86-NEXT:    mull %ecx
340; X86-NEXT:    movl %edx, %ecx
341; X86-NEXT:    movl %eax, %ebp
342; X86-NEXT:    addl %edi, %ebp
343; X86-NEXT:    adcl $0, %ecx
344; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
345; X86-NEXT:    movl %esi, %eax
346; X86-NEXT:    mull %edi
347; X86-NEXT:    movl %edi, %esi
348; X86-NEXT:    movl %edx, %ebx
349; X86-NEXT:    movl %eax, %edi
350; X86-NEXT:    addl %ebp, %edi
351; X86-NEXT:    adcl %ecx, %ebx
352; X86-NEXT:    setb %cl
353; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
354; X86-NEXT:    mull %esi
355; X86-NEXT:    addl %ebx, %eax
356; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
357; X86-NEXT:    movzbl %cl, %eax
358; X86-NEXT:    adcl %eax, %edx
359; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
360; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
361; X86-NEXT:    movl %esi, %eax
362; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
363; X86-NEXT:    mull %ecx
364; X86-NEXT:    movl %edx, %ebp
365; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
366; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
367; X86-NEXT:    mull %ecx
368; X86-NEXT:    movl %edx, %ecx
369; X86-NEXT:    movl %eax, %ebx
370; X86-NEXT:    addl %ebp, %ebx
371; X86-NEXT:    adcl $0, %ecx
372; X86-NEXT:    movl %esi, %eax
373; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
374; X86-NEXT:    mull %esi
375; X86-NEXT:    movl %edx, %ebp
376; X86-NEXT:    addl %ebx, %eax
377; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
378; X86-NEXT:    adcl %ecx, %ebp
379; X86-NEXT:    setb (%esp) # 1-byte Folded Spill
380; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
381; X86-NEXT:    mull %esi
382; X86-NEXT:    movl %edx, %ebx
383; X86-NEXT:    movl %eax, %ecx
384; X86-NEXT:    addl %ebp, %ecx
385; X86-NEXT:    movzbl (%esp), %eax # 1-byte Folded Reload
386; X86-NEXT:    adcl %eax, %ebx
387; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
388; X86-NEXT:    adcl %edi, %ebx
389; X86-NEXT:    adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
390; X86-NEXT:    adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
391; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
392; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
393; X86-NEXT:    mull %edi
394; X86-NEXT:    movl %edx, %esi
395; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
396; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
397; X86-NEXT:    mull %edi
398; X86-NEXT:    movl %edx, %edi
399; X86-NEXT:    movl %eax, %ebp
400; X86-NEXT:    addl %esi, %ebp
401; X86-NEXT:    adcl $0, %edi
402; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
403; X86-NEXT:    mull {{[0-9]+}}(%esp)
404; X86-NEXT:    movl %edx, %esi
405; X86-NEXT:    addl %ebp, %eax
406; X86-NEXT:    movl %eax, (%esp) # 4-byte Spill
407; X86-NEXT:    adcl %edi, %esi
408; X86-NEXT:    setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
409; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
410; X86-NEXT:    mull {{[0-9]+}}(%esp)
411; X86-NEXT:    movl %edx, %edi
412; X86-NEXT:    movl %eax, %ebp
413; X86-NEXT:    addl %esi, %ebp
414; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
415; X86-NEXT:    adcl %eax, %edi
416; X86-NEXT:    addl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
417; X86-NEXT:    adcl %ebx, (%esp) # 4-byte Folded Spill
418; X86-NEXT:    adcl $0, %ebp
419; X86-NEXT:    adcl $0, %edi
420; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
421; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
422; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
423; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
424; X86-NEXT:    imull %edx, %ecx
425; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
426; X86-NEXT:    movl %esi, %eax
427; X86-NEXT:    mull %edx
428; X86-NEXT:    movl %eax, %ebx
429; X86-NEXT:    addl %edx, %ecx
430; X86-NEXT:    imull {{[0-9]+}}(%esp), %esi
431; X86-NEXT:    addl %ecx, %esi
432; X86-NEXT:    addl %ebp, %ebx
433; X86-NEXT:    adcl %edi, %esi
434; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
435; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
436; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
437; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
438; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
439; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
440; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
441; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
442; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
443; X86-NEXT:    movl (%esp), %eax # 4-byte Reload
444; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
445; X86-NEXT:    movl %eax, (%esp) # 4-byte Spill
446; X86-NEXT:    adcl $0, %ebx
447; X86-NEXT:    adcl $0, %esi
448; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
449; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
450; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
451; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
452; X86-NEXT:    imull %ecx, %ebp
453; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
454; X86-NEXT:    movl %edi, %eax
455; X86-NEXT:    mull %ecx
456; X86-NEXT:    movl %eax, %ecx
457; X86-NEXT:    addl %edx, %ebp
458; X86-NEXT:    imull {{[0-9]+}}(%esp), %edi
459; X86-NEXT:    addl %ebp, %edi
460; X86-NEXT:    addl %ebx, %ecx
461; X86-NEXT:    adcl %esi, %edi
462; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
463; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
464; X86-NEXT:    movl %edi, %eax
465; X86-NEXT:    imull {{[0-9]+}}(%esp), %edi
466; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
467; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
468; X86-NEXT:    imull %edx, %esi
469; X86-NEXT:    mull %edx
470; X86-NEXT:    movl %eax, %ebp
471; X86-NEXT:    addl %edx, %edi
472; X86-NEXT:    addl %esi, %edi
473; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
474; X86-NEXT:    movl %ebx, %eax
475; X86-NEXT:    imull {{[0-9]+}}(%esp), %ebx
476; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
477; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
478; X86-NEXT:    imull %edx, %esi
479; X86-NEXT:    mull %edx
480; X86-NEXT:    addl %edx, %ebx
481; X86-NEXT:    addl %esi, %ebx
482; X86-NEXT:    addl %ebp, %eax
483; X86-NEXT:    adcl %edi, %ebx
484; X86-NEXT:    addl %ecx, %eax
485; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
486; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
487; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
488; X86-NEXT:    movl %edx, 4(%ecx)
489; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
490; X86-NEXT:    movl %edx, (%ecx)
491; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
492; X86-NEXT:    movl %edx, 8(%ecx)
493; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
494; X86-NEXT:    movl %edx, 12(%ecx)
495; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
496; X86-NEXT:    movl %edx, 16(%ecx)
497; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
498; X86-NEXT:    movl %edx, 20(%ecx)
499; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
500; X86-NEXT:    movl %edx, 24(%ecx)
501; X86-NEXT:    movl (%esp), %edx # 4-byte Reload
502; X86-NEXT:    movl %edx, 28(%ecx)
503; X86-NEXT:    movl %eax, 32(%ecx)
504; X86-NEXT:    andl $4095, %ebx # imm = 0xFFF
505; X86-NEXT:    movw %bx, 36(%ecx)
506; X86-NEXT:    movl %ecx, %eax
507; X86-NEXT:    addl $76, %esp
508; X86-NEXT:    popl %esi
509; X86-NEXT:    popl %edi
510; X86-NEXT:    popl %ebx
511; X86-NEXT:    popl %ebp
512; X86-NEXT:    retl $4
513;
514; X64-LABEL: test4:
515; X64:       # %bb.0:
516; X64-NEXT:    pushq %rbp
517; X64-NEXT:    pushq %r15
518; X64-NEXT:    pushq %r14
519; X64-NEXT:    pushq %r13
520; X64-NEXT:    pushq %r12
521; X64-NEXT:    pushq %rbx
522; X64-NEXT:    movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
523; X64-NEXT:    movq %r8, %r11
524; X64-NEXT:    movq %rcx, %r8
525; X64-NEXT:    movq %rdx, %rcx
526; X64-NEXT:    movq {{[0-9]+}}(%rsp), %r12
527; X64-NEXT:    movq {{[0-9]+}}(%rsp), %r9
528; X64-NEXT:    movq %rsi, %rax
529; X64-NEXT:    mulq %r9
530; X64-NEXT:    movq %rdx, %rbx
531; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
532; X64-NEXT:    movq %rcx, %rax
533; X64-NEXT:    mulq %r9
534; X64-NEXT:    movq %rdx, %r14
535; X64-NEXT:    movq %rax, %r15
536; X64-NEXT:    addq %rbx, %r15
537; X64-NEXT:    adcq $0, %r14
538; X64-NEXT:    movq %rsi, %rax
539; X64-NEXT:    mulq %r12
540; X64-NEXT:    movq %rdx, %rbp
541; X64-NEXT:    movq %rax, %rbx
542; X64-NEXT:    addq %r15, %rbx
543; X64-NEXT:    adcq %r14, %rbp
544; X64-NEXT:    setb %al
545; X64-NEXT:    movzbl %al, %r10d
546; X64-NEXT:    movq %rcx, %rax
547; X64-NEXT:    mulq %r12
548; X64-NEXT:    movq %rdx, %r12
549; X64-NEXT:    movq %rax, %r13
550; X64-NEXT:    addq %rbp, %r13
551; X64-NEXT:    adcq %r10, %r12
552; X64-NEXT:    movq %r8, %rax
553; X64-NEXT:    mulq %r9
554; X64-NEXT:    movq %rdx, %r15
555; X64-NEXT:    movq %rax, %r14
556; X64-NEXT:    movq %r11, %rax
557; X64-NEXT:    mulq %r9
558; X64-NEXT:    movq %rdx, %rbp
559; X64-NEXT:    movq %rax, %r10
560; X64-NEXT:    addq %r15, %r10
561; X64-NEXT:    adcq $0, %rbp
562; X64-NEXT:    movq %r8, %rax
563; X64-NEXT:    movq {{[0-9]+}}(%rsp), %r9
564; X64-NEXT:    mulq %r9
565; X64-NEXT:    movq %rax, %r15
566; X64-NEXT:    addq %r10, %r15
567; X64-NEXT:    adcq %rbp, %rdx
568; X64-NEXT:    imulq %r9, %r11
569; X64-NEXT:    movq {{[0-9]+}}(%rsp), %r9
570; X64-NEXT:    addq %r13, %r14
571; X64-NEXT:    adcq %r12, %r15
572; X64-NEXT:    adcq %rdx, %r11
573; X64-NEXT:    movq %rsi, %rax
574; X64-NEXT:    mulq %r9
575; X64-NEXT:    movq %rdx, %r10
576; X64-NEXT:    movq %rax, %r12
577; X64-NEXT:    movq %rcx, %rax
578; X64-NEXT:    mulq %r9
579; X64-NEXT:    movq %rdx, %r13
580; X64-NEXT:    movq %rax, %rbp
581; X64-NEXT:    addq %r10, %rbp
582; X64-NEXT:    adcq $0, %r13
583; X64-NEXT:    movq {{[0-9]+}}(%rsp), %r10
584; X64-NEXT:    movq %rsi, %rax
585; X64-NEXT:    mulq %r10
586; X64-NEXT:    addq %rbp, %rax
587; X64-NEXT:    adcq %r13, %rdx
588; X64-NEXT:    imulq %r10, %rcx
589; X64-NEXT:    addq %rdx, %rcx
590; X64-NEXT:    addq %r14, %r12
591; X64-NEXT:    adcq %r15, %rax
592; X64-NEXT:    adcq %r11, %rcx
593; X64-NEXT:    imulq %r9, %r8
594; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
595; X64-NEXT:    imulq {{[0-9]+}}(%rsp), %rdx
596; X64-NEXT:    imulq {{[0-9]+}}(%rsp), %rsi
597; X64-NEXT:    addq %rdx, %rsi
598; X64-NEXT:    addq %r8, %rsi
599; X64-NEXT:    addq %rcx, %rsi
600; X64-NEXT:    movq %rbx, 8(%rdi)
601; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
602; X64-NEXT:    movq %rcx, (%rdi)
603; X64-NEXT:    movq %r12, 16(%rdi)
604; X64-NEXT:    movq %rax, 24(%rdi)
605; X64-NEXT:    movl %esi, 32(%rdi)
606; X64-NEXT:    shrq $32, %rsi
607; X64-NEXT:    andl $4095, %esi # imm = 0xFFF
608; X64-NEXT:    movw %si, 36(%rdi)
609; X64-NEXT:    movq %rdi, %rax
610; X64-NEXT:    popq %rbx
611; X64-NEXT:    popq %r12
612; X64-NEXT:    popq %r13
613; X64-NEXT:    popq %r14
614; X64-NEXT:    popq %r15
615; X64-NEXT:    popq %rbp
616; X64-NEXT:    retq
617  %x = call {i300, i1} @llvm.umul.with.overflow.i300(i300 %a, i300 %b)
618  %y = extractvalue {i300, i1} %x, 0
619  ret i300 %y
620}
621