xref: /llvm-project/llvm/test/CodeGen/X86/smul-with-overflow.ll (revision e30a4fc3e20bf5d9cc2f5bfcb61b4eb0e686a193)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown-linux-gnu | FileCheck %s --check-prefixes=CHECK,X86
3; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefixes=CHECK,X64
4
5@ok = internal constant [4 x i8] c"%d\0A\00"
6@no = internal constant [4 x i8] c"no\0A\00"
7
8define i1 @test1(i32 %v1, i32 %v2) nounwind {
9; X86-LABEL: test1:
10; X86:       # %bb.0: # %entry
11; X86-NEXT:    subl $12, %esp
12; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
13; X86-NEXT:    imull {{[0-9]+}}(%esp), %eax
14; X86-NEXT:    jno .LBB0_1
15; X86-NEXT:  # %bb.2: # %overflow
16; X86-NEXT:    movl $no, (%esp)
17; X86-NEXT:    calll printf@PLT
18; X86-NEXT:    xorl %eax, %eax
19; X86-NEXT:    addl $12, %esp
20; X86-NEXT:    retl
21; X86-NEXT:  .LBB0_1: # %normal
22; X86-NEXT:    movl %eax, {{[0-9]+}}(%esp)
23; X86-NEXT:    movl $ok, (%esp)
24; X86-NEXT:    calll printf@PLT
25; X86-NEXT:    movb $1, %al
26; X86-NEXT:    addl $12, %esp
27; X86-NEXT:    retl
28;
29; X64-LABEL: test1:
30; X64:       # %bb.0: # %entry
31; X64-NEXT:    pushq %rax
32; X64-NEXT:    movl %edi, %eax
33; X64-NEXT:    imull %esi, %eax
34; X64-NEXT:    jno .LBB0_1
35; X64-NEXT:  # %bb.2: # %overflow
36; X64-NEXT:    movl $no, %edi
37; X64-NEXT:    xorl %eax, %eax
38; X64-NEXT:    callq printf@PLT
39; X64-NEXT:    xorl %eax, %eax
40; X64-NEXT:    popq %rcx
41; X64-NEXT:    retq
42; X64-NEXT:  .LBB0_1: # %normal
43; X64-NEXT:    movl $ok, %edi
44; X64-NEXT:    movl %eax, %esi
45; X64-NEXT:    xorl %eax, %eax
46; X64-NEXT:    callq printf@PLT
47; X64-NEXT:    movb $1, %al
48; X64-NEXT:    popq %rcx
49; X64-NEXT:    retq
50entry:
51  %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
52  %sum = extractvalue {i32, i1} %t, 0
53  %obit = extractvalue {i32, i1} %t, 1
54  br i1 %obit, label %overflow, label %normal
55
56normal:
57  %t1 = tail call i32 (ptr, ...) @printf( ptr @ok, i32 %sum ) nounwind
58  ret i1 true
59
60overflow:
61  %t2 = tail call i32 (ptr, ...) @printf( ptr @no ) nounwind
62  ret i1 false
63}
64
65define i1 @test2(i32 %v1, i32 %v2) nounwind {
66; X86-LABEL: test2:
67; X86:       # %bb.0: # %entry
68; X86-NEXT:    subl $12, %esp
69; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
70; X86-NEXT:    imull {{[0-9]+}}(%esp), %eax
71; X86-NEXT:    jno .LBB1_2
72; X86-NEXT:  # %bb.1: # %overflow
73; X86-NEXT:    movl $no, (%esp)
74; X86-NEXT:    calll printf@PLT
75; X86-NEXT:    xorl %eax, %eax
76; X86-NEXT:    addl $12, %esp
77; X86-NEXT:    retl
78; X86-NEXT:  .LBB1_2: # %normal
79; X86-NEXT:    movl %eax, {{[0-9]+}}(%esp)
80; X86-NEXT:    movl $ok, (%esp)
81; X86-NEXT:    calll printf@PLT
82; X86-NEXT:    movb $1, %al
83; X86-NEXT:    addl $12, %esp
84; X86-NEXT:    retl
85;
86; X64-LABEL: test2:
87; X64:       # %bb.0: # %entry
88; X64-NEXT:    pushq %rax
89; X64-NEXT:    movl %edi, %eax
90; X64-NEXT:    imull %esi, %eax
91; X64-NEXT:    jno .LBB1_2
92; X64-NEXT:  # %bb.1: # %overflow
93; X64-NEXT:    movl $no, %edi
94; X64-NEXT:    xorl %eax, %eax
95; X64-NEXT:    callq printf@PLT
96; X64-NEXT:    xorl %eax, %eax
97; X64-NEXT:    popq %rcx
98; X64-NEXT:    retq
99; X64-NEXT:  .LBB1_2: # %normal
100; X64-NEXT:    movl $ok, %edi
101; X64-NEXT:    movl %eax, %esi
102; X64-NEXT:    xorl %eax, %eax
103; X64-NEXT:    callq printf@PLT
104; X64-NEXT:    movb $1, %al
105; X64-NEXT:    popq %rcx
106; X64-NEXT:    retq
107entry:
108  %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
109  %sum = extractvalue {i32, i1} %t, 0
110  %obit = extractvalue {i32, i1} %t, 1
111  br i1 %obit, label %overflow, label %normal
112
113overflow:
114  %t2 = tail call i32 (ptr, ...) @printf( ptr @no ) nounwind
115  ret i1 false
116
117normal:
118  %t1 = tail call i32 (ptr, ...) @printf( ptr @ok, i32 %sum ) nounwind
119  ret i1 true
120}
121
122declare i32 @printf(ptr, ...) nounwind
123declare {i32, i1} @llvm.smul.with.overflow.i32(i32, i32)
124
125define i32 @test3(i32 %a, i32 %b) nounwind readnone {
126; X86-LABEL: test3:
127; X86:       # %bb.0: # %entry
128; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
129; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
130; X86-NEXT:    addl %eax, %eax
131; X86-NEXT:    retl
132;
133; X64-LABEL: test3:
134; X64:       # %bb.0: # %entry
135; X64-NEXT:    # kill: def $esi killed $esi def $rsi
136; X64-NEXT:    # kill: def $edi killed $edi def $rdi
137; X64-NEXT:    leal (%rdi,%rsi), %eax
138; X64-NEXT:    addl %eax, %eax
139; X64-NEXT:    retq
140entry:
141	%tmp0 = add i32 %b, %a
142	%tmp1 = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %tmp0, i32 2)
143	%tmp2 = extractvalue { i32, i1 } %tmp1, 0
144	ret i32 %tmp2
145}
146
147define i32 @test4(i32 %a, i32 %b) nounwind readnone {
148; X86-LABEL: test4:
149; X86:       # %bb.0: # %entry
150; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
151; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
152; X86-NEXT:    imull $4, %eax, %eax
153; X86-NEXT:    retl
154;
155; X64-LABEL: test4:
156; X64:       # %bb.0: # %entry
157; X64-NEXT:    addl %esi, %edi
158; X64-NEXT:    imull $4, %edi, %eax
159; X64-NEXT:    retq
160entry:
161	%tmp0 = add i32 %b, %a
162	%tmp1 = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %tmp0, i32 4)
163	%tmp2 = extractvalue { i32, i1 } %tmp1, 0
164	ret i32 %tmp2
165}
166
167declare { i63, i1 } @llvm.smul.with.overflow.i63(i63, i63) nounwind readnone
168
169; Was returning false, should return true (not constant folded yet though).
170; PR13991
171define i1 @test5() nounwind {
172; CHECK-LABEL: test5:
173; CHECK:       # %bb.0: # %entry
174; CHECK-NEXT:    movb $1, %al
175; CHECK-NEXT:    ret{{[l|q]}}
176entry:
177  %res = call { i63, i1 } @llvm.smul.with.overflow.i63(i63 4, i63 4611686018427387903)
178  %sum = extractvalue { i63, i1 } %res, 0
179  %overflow = extractvalue { i63, i1 } %res, 1
180  ret i1 %overflow
181}
182
183
184
185declare { i129, i1 } @llvm.smul.with.overflow.i129(i129, i129)
186
187define { i129, i1 } @smul_ovf(i129 %x, i129 %y) nounwind {
188; X86-LABEL: smul_ovf:
189; X86:       # %bb.0:
190; X86-NEXT:    pushl %ebp
191; X86-NEXT:    pushl %ebx
192; X86-NEXT:    pushl %edi
193; X86-NEXT:    pushl %esi
194; X86-NEXT:    subl $108, %esp
195; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
196; X86-NEXT:    andl $1, %eax
197; X86-NEXT:    negl %eax
198; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
199; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
200; X86-NEXT:    andl $1, %eax
201; X86-NEXT:    negl %eax
202; X86-NEXT:    movl %eax, %ecx
203; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
204; X86-NEXT:    mull %ecx
205; X86-NEXT:    movl %eax, %esi
206; X86-NEXT:    movl %eax, %ebx
207; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
208; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
209; X86-NEXT:    addl %edx, %esi
210; X86-NEXT:    movl %esi, %edi
211; X86-NEXT:    movl %edx, %esi
212; X86-NEXT:    adcl $0, %esi
213; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
214; X86-NEXT:    mull %ecx
215; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
216; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
217; X86-NEXT:    movl %edi, %ebp
218; X86-NEXT:    addl %eax, %ebp
219; X86-NEXT:    movl %eax, %edi
220; X86-NEXT:    movl %eax, (%esp) # 4-byte Spill
221; X86-NEXT:    movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
222; X86-NEXT:    adcl %edx, %esi
223; X86-NEXT:    setb %al
224; X86-NEXT:    addl %edi, %esi
225; X86-NEXT:    movzbl %al, %edi
226; X86-NEXT:    adcl %edx, %edi
227; X86-NEXT:    movl %ebx, %eax
228; X86-NEXT:    addl %esi, %eax
229; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
230; X86-NEXT:    movl %ebp, %eax
231; X86-NEXT:    adcl %edi, %eax
232; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
233; X86-NEXT:    adcl $0, %esi
234; X86-NEXT:    adcl $0, %edi
235; X86-NEXT:    movl %ecx, %eax
236; X86-NEXT:    mull {{[0-9]+}}(%esp)
237; X86-NEXT:    movl %edx, %ebx
238; X86-NEXT:    movl %eax, %ebp
239; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
240; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
241; X86-NEXT:    addl %edx, %ebp
242; X86-NEXT:    adcl $0, %ebx
243; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
244; X86-NEXT:    mull {{[0-9]+}}(%esp)
245; X86-NEXT:    addl %eax, %ebp
246; X86-NEXT:    movl %eax, %ecx
247; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
248; X86-NEXT:    adcl %edx, %ebx
249; X86-NEXT:    setb %al
250; X86-NEXT:    addl %ecx, %ebx
251; X86-NEXT:    movzbl %al, %ecx
252; X86-NEXT:    adcl %edx, %ecx
253; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
254; X86-NEXT:    addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
255; X86-NEXT:    adcl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
256; X86-NEXT:    movl %ebx, %edx
257; X86-NEXT:    adcl $0, %edx
258; X86-NEXT:    movl %ecx, %eax
259; X86-NEXT:    adcl $0, %eax
260; X86-NEXT:    addl %esi, %edx
261; X86-NEXT:    adcl %edi, %eax
262; X86-NEXT:    setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
263; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
264; X86-NEXT:    adcl %ebp, %eax
265; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
266; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
267; X86-NEXT:    adcl %ebx, %eax
268; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
269; X86-NEXT:    adcl $0, %ecx
270; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
271; X86-NEXT:    movl (%esp), %esi # 4-byte Reload
272; X86-NEXT:    movl %esi, %edi
273; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
274; X86-NEXT:    addl %ebp, %edi
275; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
276; X86-NEXT:    adcl $0, %ebx
277; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
278; X86-NEXT:    addl %ecx, %edi
279; X86-NEXT:    adcl %ebp, %ebx
280; X86-NEXT:    movl %ebx, %ebp
281; X86-NEXT:    setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
282; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
283; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
284; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
285; X86-NEXT:    addl %ebx, %eax
286; X86-NEXT:    addl %ecx, %ebx
287; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
288; X86-NEXT:    movl %ecx, %ebx
289; X86-NEXT:    adcl %edi, %eax
290; X86-NEXT:    movl %eax, %ecx
291; X86-NEXT:    addl %esi, %ebp
292; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
293; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
294; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
295; X86-NEXT:    adcl %ecx, %eax
296; X86-NEXT:    addl %ebx, %edx
297; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
298; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
299; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
300; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
301; X86-NEXT:    movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
302; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
303; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
304; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
305; X86-NEXT:    movl %ebx, %eax
306; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
307; X86-NEXT:    mull %ecx
308; X86-NEXT:    movl %eax, (%esp) # 4-byte Spill
309; X86-NEXT:    movl %edx, %edi
310; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
311; X86-NEXT:    mull %ecx
312; X86-NEXT:    movl %edx, %ecx
313; X86-NEXT:    movl %eax, %esi
314; X86-NEXT:    addl %edi, %esi
315; X86-NEXT:    adcl $0, %ecx
316; X86-NEXT:    movl %ebx, %eax
317; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
318; X86-NEXT:    mull %edi
319; X86-NEXT:    movl %edx, %ebp
320; X86-NEXT:    addl %esi, %eax
321; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
322; X86-NEXT:    adcl %ecx, %ebp
323; X86-NEXT:    setb %cl
324; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
325; X86-NEXT:    mull %edi
326; X86-NEXT:    addl %ebp, %eax
327; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
328; X86-NEXT:    movzbl %cl, %eax
329; X86-NEXT:    adcl %eax, %edx
330; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
331; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
332; X86-NEXT:    movl %ebx, %eax
333; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
334; X86-NEXT:    mull %ecx
335; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
336; X86-NEXT:    movl %edx, %edi
337; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
338; X86-NEXT:    mull %ecx
339; X86-NEXT:    movl %edx, %ecx
340; X86-NEXT:    movl %eax, %esi
341; X86-NEXT:    addl %edi, %esi
342; X86-NEXT:    adcl $0, %ecx
343; X86-NEXT:    movl %ebx, %eax
344; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
345; X86-NEXT:    mull %ebp
346; X86-NEXT:    movl %edx, %edi
347; X86-NEXT:    addl %esi, %eax
348; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
349; X86-NEXT:    adcl %ecx, %edi
350; X86-NEXT:    setb %bl
351; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
352; X86-NEXT:    mull %ebp
353; X86-NEXT:    movl %edx, %ebp
354; X86-NEXT:    movl %eax, %ecx
355; X86-NEXT:    addl %edi, %ecx
356; X86-NEXT:    movzbl %bl, %eax
357; X86-NEXT:    adcl %eax, %ebp
358; X86-NEXT:    addl (%esp), %ecx # 4-byte Folded Reload
359; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
360; X86-NEXT:    adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
361; X86-NEXT:    adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
362; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
363; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
364; X86-NEXT:    mull %edi
365; X86-NEXT:    movl %edx, %esi
366; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
367; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
368; X86-NEXT:    mull %edi
369; X86-NEXT:    movl %edx, %edi
370; X86-NEXT:    movl %eax, %ebx
371; X86-NEXT:    addl %esi, %ebx
372; X86-NEXT:    adcl $0, %edi
373; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
374; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
375; X86-NEXT:    mull %edx
376; X86-NEXT:    movl %edx, %esi
377; X86-NEXT:    addl %ebx, %eax
378; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
379; X86-NEXT:    adcl %edi, %esi
380; X86-NEXT:    setb (%esp) # 1-byte Folded Spill
381; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
382; X86-NEXT:    mull {{[0-9]+}}(%esp)
383; X86-NEXT:    movl %edx, %edi
384; X86-NEXT:    movl %eax, %ebx
385; X86-NEXT:    addl %esi, %ebx
386; X86-NEXT:    movzbl (%esp), %eax # 1-byte Folded Reload
387; X86-NEXT:    adcl %eax, %edi
388; X86-NEXT:    addl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
389; X86-NEXT:    adcl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
390; X86-NEXT:    adcl $0, %ebx
391; X86-NEXT:    adcl $0, %edi
392; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
393; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
394; X86-NEXT:    setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
395; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
396; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
397; X86-NEXT:    mull %ecx
398; X86-NEXT:    movl %edx, %ebp
399; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
400; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
401; X86-NEXT:    mull %ecx
402; X86-NEXT:    movl %edx, %ecx
403; X86-NEXT:    movl %eax, %esi
404; X86-NEXT:    addl %ebp, %esi
405; X86-NEXT:    adcl $0, %ecx
406; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
407; X86-NEXT:    mull {{[0-9]+}}(%esp)
408; X86-NEXT:    movl %edx, %ebp
409; X86-NEXT:    addl %esi, %eax
410; X86-NEXT:    movl %eax, (%esp) # 4-byte Spill
411; X86-NEXT:    adcl %ecx, %ebp
412; X86-NEXT:    setb %cl
413; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
414; X86-NEXT:    mull {{[0-9]+}}(%esp)
415; X86-NEXT:    movl %eax, %esi
416; X86-NEXT:    addl %ebp, %esi
417; X86-NEXT:    movzbl %cl, %eax
418; X86-NEXT:    movl %edx, %ebp
419; X86-NEXT:    adcl %eax, %ebp
420; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
421; X86-NEXT:    addl %ebx, %ecx
422; X86-NEXT:    movl (%esp), %edx # 4-byte Reload
423; X86-NEXT:    adcl %edi, %edx
424; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
425; X86-NEXT:    adcl %eax, %esi
426; X86-NEXT:    adcl $0, %ebp
427; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
428; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
429; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
430; X86-NEXT:    movl %edx, (%esp) # 4-byte Spill
431; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
432; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
433; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
434; X86-NEXT:    movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
435; X86-NEXT:    adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
436; X86-NEXT:    adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
437; X86-NEXT:    adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
438; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
439; X86-NEXT:    adcl $0, %eax
440; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
441; X86-NEXT:    sarl $31, %eax
442; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
443; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
444; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
445; X86-NEXT:    mull %ecx
446; X86-NEXT:    movl %edx, %esi
447; X86-NEXT:    movl %eax, %edi
448; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
449; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
450; X86-NEXT:    mull %ecx
451; X86-NEXT:    movl %eax, %ebp
452; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
453; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
454; X86-NEXT:    addl %esi, %eax
455; X86-NEXT:    movl %edx, %ebx
456; X86-NEXT:    adcl $0, %ebx
457; X86-NEXT:    addl %edi, %eax
458; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
459; X86-NEXT:    adcl %esi, %ebx
460; X86-NEXT:    setb %al
461; X86-NEXT:    addl %ebp, %ebx
462; X86-NEXT:    movzbl %al, %eax
463; X86-NEXT:    adcl %edx, %eax
464; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
465; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
466; X86-NEXT:    mull %ecx
467; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
468; X86-NEXT:    movl %eax, %esi
469; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
470; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
471; X86-NEXT:    mull %ecx
472; X86-NEXT:    movl %eax, %edi
473; X86-NEXT:    movl %eax, %ebp
474; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
475; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
476; X86-NEXT:    addl %eax, %edi
477; X86-NEXT:    movl %edx, %ecx
478; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
479; X86-NEXT:    adcl $0, %ecx
480; X86-NEXT:    addl %esi, %edi
481; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
482; X86-NEXT:    adcl %eax, %ecx
483; X86-NEXT:    setb %al
484; X86-NEXT:    addl %ebp, %ecx
485; X86-NEXT:    movzbl %al, %eax
486; X86-NEXT:    adcl %edx, %eax
487; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
488; X86-NEXT:    addl %ecx, %edi
489; X86-NEXT:    movl %ecx, %edx
490; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
491; X86-NEXT:    adcl %eax, %ecx
492; X86-NEXT:    movl %ebx, %esi
493; X86-NEXT:    adcl $0, %esi
494; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
495; X86-NEXT:    adcl $0, %ebp
496; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
497; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
498; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
499; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
500; X86-NEXT:    adcl $0, %edx
501; X86-NEXT:    adcl $0, %eax
502; X86-NEXT:    addl %esi, %edx
503; X86-NEXT:    adcl %ebp, %eax
504; X86-NEXT:    setb %cl
505; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
506; X86-NEXT:    addl %edi, %edx
507; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
508; X86-NEXT:    movzbl %cl, %ecx
509; X86-NEXT:    adcl %ebx, %ecx
510; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
511; X86-NEXT:    adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
512; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
513; X86-NEXT:    movl %esi, %ebx
514; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
515; X86-NEXT:    addl %ecx, %ebx
516; X86-NEXT:    adcl $0, %ecx
517; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
518; X86-NEXT:    movl %ebx, %ebp
519; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
520; X86-NEXT:    movl %ecx, %ebx
521; X86-NEXT:    setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
522; X86-NEXT:    movl %edi, %ecx
523; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
524; X86-NEXT:    addl %ecx, %edi
525; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
526; X86-NEXT:    addl %esi, %ecx
527; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
528; X86-NEXT:    adcl %ebp, %edi
529; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
530; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
531; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
532; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
533; X86-NEXT:    adcl %edi, %ecx
534; X86-NEXT:    addl %esi, %edx
535; X86-NEXT:    adcl %eax, %ebp
536; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
537; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
538; X86-NEXT:    addl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
539; X86-NEXT:    movl (%esp), %eax # 4-byte Reload
540; X86-NEXT:    adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
541; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
542; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
543; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
544; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
545; X86-NEXT:    adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
546; X86-NEXT:    adcl $0, %edx
547; X86-NEXT:    adcl $0, %ebp
548; X86-NEXT:    adcl $0, %ebx
549; X86-NEXT:    adcl $0, %ecx
550; X86-NEXT:    movl %ecx, %esi
551; X86-NEXT:    sarl $31, %esi
552; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
553; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
554; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
555; X86-NEXT:    movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
556; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
557; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
558; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
559; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
560; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
561; X86-NEXT:    movl %eax, %ecx
562; X86-NEXT:    adcl %esi, %ecx
563; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
564; X86-NEXT:    movl %eax, %ecx
565; X86-NEXT:    adcl %esi, %ecx
566; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
567; X86-NEXT:    movl %eax, %ecx
568; X86-NEXT:    adcl %esi, %ecx
569; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
570; X86-NEXT:    adcl %eax, %esi
571; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
572; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
573; X86-NEXT:    mull {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
574; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
575; X86-NEXT:    movl %eax, %esi
576; X86-NEXT:    addl %edx, %esi
577; X86-NEXT:    movl %edx, %ecx
578; X86-NEXT:    adcl $0, %ecx
579; X86-NEXT:    addl %eax, %esi
580; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
581; X86-NEXT:    adcl %edx, %ecx
582; X86-NEXT:    setb %bl
583; X86-NEXT:    addl %eax, %ecx
584; X86-NEXT:    movzbl %bl, %ebx
585; X86-NEXT:    adcl %edx, %ebx
586; X86-NEXT:    addl %ecx, %eax
587; X86-NEXT:    movl %esi, %edi
588; X86-NEXT:    adcl %ebx, %edi
589; X86-NEXT:    movl %ecx, %ebp
590; X86-NEXT:    adcl $0, %ebp
591; X86-NEXT:    movl %ebx, %edx
592; X86-NEXT:    adcl $0, %edx
593; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
594; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
595; X86-NEXT:    adcl %esi, %edi
596; X86-NEXT:    movl %edi, (%esp) # 4-byte Spill
597; X86-NEXT:    movl %ecx, %esi
598; X86-NEXT:    adcl $0, %esi
599; X86-NEXT:    movl %ebx, %edi
600; X86-NEXT:    adcl $0, %edi
601; X86-NEXT:    addl %ebp, %esi
602; X86-NEXT:    adcl %edx, %edi
603; X86-NEXT:    setb %al
604; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
605; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
606; X86-NEXT:    adcl %ebp, %edi
607; X86-NEXT:    movzbl %al, %eax
608; X86-NEXT:    adcl %ecx, %eax
609; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
610; X86-NEXT:    movl %ebx, %eax
611; X86-NEXT:    adcl $0, %eax
612; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
613; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
614; X86-NEXT:    imull {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
615; X86-NEXT:    addl %eax, %eax
616; X86-NEXT:    adcl %edx, %edx
617; X86-NEXT:    addl %ecx, %eax
618; X86-NEXT:    adcl %ebx, %edx
619; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
620; X86-NEXT:    addl %ebx, %ebx
621; X86-NEXT:    movl %ebp, %ecx
622; X86-NEXT:    adcl %ebp, %ecx
623; X86-NEXT:    adcl %eax, %eax
624; X86-NEXT:    adcl %edx, %edx
625; X86-NEXT:    addl %esi, %ebx
626; X86-NEXT:    adcl %edi, %ecx
627; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
628; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
629; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
630; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
631; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
632; X86-NEXT:    movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
633; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
634; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
635; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
636; X86-NEXT:    movl (%esp), %edi # 4-byte Reload
637; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
638; X86-NEXT:    movl %edi, (%esp) # 4-byte Spill
639; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
640; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
641; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
642; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
643; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
644; X86-NEXT:    movl %edi, %ebp
645; X86-NEXT:    sarl $31, %ebp
646; X86-NEXT:    xorl %ebp, %ebx
647; X86-NEXT:    xorl %ebp, %esi
648; X86-NEXT:    orl %ebx, %esi
649; X86-NEXT:    xorl %ebp, %eax
650; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
651; X86-NEXT:    xorl %ebp, %ebx
652; X86-NEXT:    orl %eax, %ebx
653; X86-NEXT:    orl %esi, %ebx
654; X86-NEXT:    xorl %ebp, %ecx
655; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
656; X86-NEXT:    xorl %ebp, %eax
657; X86-NEXT:    orl %ecx, %eax
658; X86-NEXT:    xorl %ebp, %edx
659; X86-NEXT:    xorl (%esp), %ebp # 4-byte Folded Reload
660; X86-NEXT:    orl %edx, %ebp
661; X86-NEXT:    orl %eax, %ebp
662; X86-NEXT:    orl %ebx, %ebp
663; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
664; X86-NEXT:    movl %edx, %ecx
665; X86-NEXT:    andl $1, %ecx
666; X86-NEXT:    movl %ecx, %eax
667; X86-NEXT:    negl %eax
668; X86-NEXT:    xorl %eax, %edi
669; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
670; X86-NEXT:    xorl %eax, %esi
671; X86-NEXT:    orl %edi, %esi
672; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
673; X86-NEXT:    xorl %eax, %edi
674; X86-NEXT:    xorl %edx, %eax
675; X86-NEXT:    orl %edi, %eax
676; X86-NEXT:    orl %esi, %eax
677; X86-NEXT:    orl %ebp, %eax
678; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
679; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
680; X86-NEXT:    movl %edx, 4(%eax)
681; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
682; X86-NEXT:    movl %edx, (%eax)
683; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
684; X86-NEXT:    movl %edx, 8(%eax)
685; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
686; X86-NEXT:    movl %edx, 12(%eax)
687; X86-NEXT:    movb %cl, 16(%eax)
688; X86-NEXT:    setne 32(%eax)
689; X86-NEXT:    addl $108, %esp
690; X86-NEXT:    popl %esi
691; X86-NEXT:    popl %edi
692; X86-NEXT:    popl %ebx
693; X86-NEXT:    popl %ebp
694; X86-NEXT:    retl $4
695;
696; X64-LABEL: smul_ovf:
697; X64:       # %bb.0:
698; X64-NEXT:    pushq %rbp
699; X64-NEXT:    pushq %r15
700; X64-NEXT:    pushq %r14
701; X64-NEXT:    pushq %r13
702; X64-NEXT:    pushq %r12
703; X64-NEXT:    pushq %rbx
704; X64-NEXT:    movq %rcx, %r14
705; X64-NEXT:    movq %rdx, %r15
706; X64-NEXT:    movq %rsi, %rbx
707; X64-NEXT:    movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
708; X64-NEXT:    movq {{[0-9]+}}(%rsp), %r13
709; X64-NEXT:    andl $1, %r13d
710; X64-NEXT:    negq %r13
711; X64-NEXT:    andl $1, %r14d
712; X64-NEXT:    negq %r14
713; X64-NEXT:    movq %r14, %rax
714; X64-NEXT:    mulq %r8
715; X64-NEXT:    movq %rdx, %r11
716; X64-NEXT:    movq %rax, %rdi
717; X64-NEXT:    movq %rax, %r12
718; X64-NEXT:    addq %rdx, %r12
719; X64-NEXT:    adcq $0, %r11
720; X64-NEXT:    movq %r14, %rax
721; X64-NEXT:    mulq %r9
722; X64-NEXT:    addq %rax, %r12
723; X64-NEXT:    adcq %rdx, %r11
724; X64-NEXT:    setb %cl
725; X64-NEXT:    movzbl %cl, %ecx
726; X64-NEXT:    addq %rax, %r11
727; X64-NEXT:    adcq %rdx, %rcx
728; X64-NEXT:    addq %rdi, %r11
729; X64-NEXT:    adcq %r12, %rcx
730; X64-NEXT:    movq %rsi, %rax
731; X64-NEXT:    mulq %r8
732; X64-NEXT:    movq %rdx, %r10
733; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
734; X64-NEXT:    movq %r15, %rax
735; X64-NEXT:    mulq %r8
736; X64-NEXT:    movq %rdx, %r8
737; X64-NEXT:    movq %rax, %rbp
738; X64-NEXT:    addq %r10, %rbp
739; X64-NEXT:    adcq $0, %r8
740; X64-NEXT:    movq %rsi, %rax
741; X64-NEXT:    mulq %r9
742; X64-NEXT:    movq %rdx, %rsi
743; X64-NEXT:    addq %rbp, %rax
744; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
745; X64-NEXT:    adcq %r8, %rsi
746; X64-NEXT:    setb %al
747; X64-NEXT:    movzbl %al, %ebp
748; X64-NEXT:    movq %r15, %rax
749; X64-NEXT:    mulq %r9
750; X64-NEXT:    movq %rdx, %r8
751; X64-NEXT:    movq %rax, %r10
752; X64-NEXT:    addq %rsi, %r10
753; X64-NEXT:    adcq %rbp, %r8
754; X64-NEXT:    addq %rdi, %r10
755; X64-NEXT:    adcq %r12, %r8
756; X64-NEXT:    adcq $0, %r11
757; X64-NEXT:    adcq $0, %rcx
758; X64-NEXT:    movq %r13, %rax
759; X64-NEXT:    mulq %rbx
760; X64-NEXT:    movq %rdx, %rsi
761; X64-NEXT:    movq %rax, %r9
762; X64-NEXT:    movq %r15, %rax
763; X64-NEXT:    mulq %r13
764; X64-NEXT:    movq %rax, %rbx
765; X64-NEXT:    addq %rsi, %rbx
766; X64-NEXT:    movq %rdx, %r15
767; X64-NEXT:    adcq $0, %r15
768; X64-NEXT:    addq %r9, %rbx
769; X64-NEXT:    adcq %rsi, %r15
770; X64-NEXT:    setb %sil
771; X64-NEXT:    movzbl %sil, %esi
772; X64-NEXT:    addq %rax, %r15
773; X64-NEXT:    adcq %rdx, %rsi
774; X64-NEXT:    addq %r9, %r15
775; X64-NEXT:    adcq %rbx, %rsi
776; X64-NEXT:    addq %r9, %r10
777; X64-NEXT:    adcq %r8, %rbx
778; X64-NEXT:    adcq $0, %r15
779; X64-NEXT:    adcq $0, %rsi
780; X64-NEXT:    movq %rsi, %rax
781; X64-NEXT:    sarq $63, %rax
782; X64-NEXT:    movq %rcx, %rdi
783; X64-NEXT:    sarq $63, %rdi
784; X64-NEXT:    addq %r11, %r15
785; X64-NEXT:    adcq %rcx, %rsi
786; X64-NEXT:    movq %rdi, %r9
787; X64-NEXT:    adcq %rax, %r9
788; X64-NEXT:    adcq %rax, %rdi
789; X64-NEXT:    movq %r14, %rax
790; X64-NEXT:    mulq %r13
791; X64-NEXT:    movq %rax, %r8
792; X64-NEXT:    movq %rax, %rcx
793; X64-NEXT:    addq %rdx, %rcx
794; X64-NEXT:    movq %rdx, %r11
795; X64-NEXT:    adcq $0, %r11
796; X64-NEXT:    addq %rax, %rcx
797; X64-NEXT:    adcq %rdx, %r11
798; X64-NEXT:    setb %al
799; X64-NEXT:    addq %r8, %r11
800; X64-NEXT:    movzbl %al, %r12d
801; X64-NEXT:    adcq %rdx, %r12
802; X64-NEXT:    movq %r13, %rax
803; X64-NEXT:    imulq %r14
804; X64-NEXT:    addq %rax, %rax
805; X64-NEXT:    adcq %rdx, %rdx
806; X64-NEXT:    addq %r11, %rax
807; X64-NEXT:    adcq %r12, %rdx
808; X64-NEXT:    addq %r8, %r15
809; X64-NEXT:    adcq %rsi, %rcx
810; X64-NEXT:    adcq %r9, %rax
811; X64-NEXT:    adcq %rdi, %rdx
812; X64-NEXT:    movq %rbx, %rsi
813; X64-NEXT:    sarq $63, %rsi
814; X64-NEXT:    xorq %rsi, %rax
815; X64-NEXT:    xorq %rsi, %r15
816; X64-NEXT:    orq %rax, %r15
817; X64-NEXT:    xorq %rsi, %rdx
818; X64-NEXT:    xorq %rcx, %rsi
819; X64-NEXT:    orq %rdx, %rsi
820; X64-NEXT:    orq %r15, %rsi
821; X64-NEXT:    movl %r10d, %edx
822; X64-NEXT:    andl $1, %edx
823; X64-NEXT:    movq %rdx, %rcx
824; X64-NEXT:    negq %rcx
825; X64-NEXT:    xorq %rcx, %rbx
826; X64-NEXT:    xorq %r10, %rcx
827; X64-NEXT:    orq %rbx, %rcx
828; X64-NEXT:    orq %rsi, %rcx
829; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
830; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
831; X64-NEXT:    movq %rcx, 8(%rax)
832; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
833; X64-NEXT:    movq %rcx, (%rax)
834; X64-NEXT:    movb %dl, 16(%rax)
835; X64-NEXT:    setne 32(%rax)
836; X64-NEXT:    popq %rbx
837; X64-NEXT:    popq %r12
838; X64-NEXT:    popq %r13
839; X64-NEXT:    popq %r14
840; X64-NEXT:    popq %r15
841; X64-NEXT:    popq %rbp
842; X64-NEXT:    retq
843  %r = tail call { i129, i1 } @llvm.smul.with.overflow.i129(i129 %x, i129 %y)
844  ret { i129, i1 } %r
845}
846