xref: /llvm-project/llvm/test/CodeGen/SPARC/overflow-intrinsic-optimizations.ll (revision ff9af4c43ad71eeba2cabe99609cfaa0fd54c1d0)
1; RUN: llc %s -mtriple=sparc -o - | FileCheck %s
2; RUN: llc %s -mtriple=sparc64 -o - | FileCheck %s
3declare { i128, i1 } @llvm.smul.with.overflow.i128(i128, i128)
4declare { i64, i1 } @llvm.smul.with.overflow.i64(i64, i64)
5
6define i32 @mul(i128 %a, i128 %b, ptr %r) {
7; CHECK-LABEL: mul
8; CHECK-NOT: call __muloti4
9  %mul4 = tail call { i128, i1 } @llvm.smul.with.overflow.i128(i128 %a, i128 %b)
10  %mul.val = extractvalue { i128, i1 } %mul4, 0
11  %mul.ov = extractvalue { i128, i1 } %mul4, 1
12  %mul.not.ov = xor i1 %mul.ov, true
13  store i128 %mul.val, ptr %r, align 16
14  %conv = zext i1 %mul.not.ov to i32
15  ret i32 %conv
16}
17
18define i32 @mul2(i64 %a, i64 %b, ptr %r) {
19; CHECK-LABEL: mul2
20; CHECK-NOT: call __mulodi4
21  %mul4 = tail call { i64, i1 } @llvm.smul.with.overflow.i64(i64 %a, i64 %b)
22  %mul.val = extractvalue { i64, i1 } %mul4, 0
23  %mul.ov = extractvalue { i64, i1 } %mul4, 1
24  %mul.not.ov = xor i1 %mul.ov, true
25  store i64 %mul.val, ptr %r, align 16
26  %conv = zext i1 %mul.not.ov to i32
27  ret i32 %conv
28}
29