xref: /llvm-project/llvm/test/Instrumentation/MemorySanitizer/overflow.ll (revision 79343fa8c3575be12ec4d543f4aebebd1ba4f47f)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
2; RUN: opt %s -S -passes=msan 2>&1 | FileCheck %s
3
4target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
5target triple = "x86_64-unknown-linux-gnu"
6
7define {i64, i1} @test_sadd_with_overflow(i64 %a, i64 %b) #0 {
8; CHECK-LABEL: define { i64, i1 } @test_sadd_with_overflow(
9; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0:[0-9]+]] {
10; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
11; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
12; CHECK-NEXT:    call void @llvm.donothing()
13; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
14; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i64 [[TMP2]], 0
15; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
16; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0:![0-9]+]]
17; CHECK:       3:
18; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4:[0-9]+]]
19; CHECK-NEXT:    unreachable
20; CHECK:       4:
21; CHECK-NEXT:    [[RES:%.*]] = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 [[A]], i64 [[B]])
22; CHECK-NEXT:    store { i64, i1 } zeroinitializer, ptr @__msan_retval_tls, align 8
23; CHECK-NEXT:    ret { i64, i1 } [[RES]]
24;
25  %res = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %a, i64 %b)
26  ret { i64, i1 } %res
27}
28
29define {i64, i1} @test_uadd_with_overflow(i64 %a, i64 %b) #0 {
30; CHECK-LABEL: define { i64, i1 } @test_uadd_with_overflow(
31; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] {
32; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
33; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
34; CHECK-NEXT:    call void @llvm.donothing()
35; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
36; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i64 [[TMP2]], 0
37; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
38; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
39; CHECK:       3:
40; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
41; CHECK-NEXT:    unreachable
42; CHECK:       4:
43; CHECK-NEXT:    [[RES:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[A]], i64 [[B]])
44; CHECK-NEXT:    store { i64, i1 } zeroinitializer, ptr @__msan_retval_tls, align 8
45; CHECK-NEXT:    ret { i64, i1 } [[RES]]
46;
47  %res = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
48  ret { i64, i1 } %res
49}
50
51define {i64, i1} @test_smul_with_overflow(i64 %a, i64 %b) #0 {
52; CHECK-LABEL: define { i64, i1 } @test_smul_with_overflow(
53; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] {
54; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
55; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
56; CHECK-NEXT:    call void @llvm.donothing()
57; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
58; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i64 [[TMP2]], 0
59; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
60; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
61; CHECK:       3:
62; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
63; CHECK-NEXT:    unreachable
64; CHECK:       4:
65; CHECK-NEXT:    [[RES:%.*]] = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 [[A]], i64 [[B]])
66; CHECK-NEXT:    store { i64, i1 } zeroinitializer, ptr @__msan_retval_tls, align 8
67; CHECK-NEXT:    ret { i64, i1 } [[RES]]
68;
69  %res = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 %a, i64 %b)
70  ret { i64, i1 } %res
71}
72define {i64, i1} @test_umul_with_overflow(i64 %a, i64 %b) #0 {
73; CHECK-LABEL: define { i64, i1 } @test_umul_with_overflow(
74; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] {
75; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
76; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
77; CHECK-NEXT:    call void @llvm.donothing()
78; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
79; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i64 [[TMP2]], 0
80; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
81; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
82; CHECK:       3:
83; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
84; CHECK-NEXT:    unreachable
85; CHECK:       4:
86; CHECK-NEXT:    [[RES:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[A]], i64 [[B]])
87; CHECK-NEXT:    store { i64, i1 } zeroinitializer, ptr @__msan_retval_tls, align 8
88; CHECK-NEXT:    ret { i64, i1 } [[RES]]
89;
90  %res = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %a, i64 %b)
91  ret { i64, i1 } %res
92}
93define {i64, i1} @test_ssub_with_overflow(i64 %a, i64 %b) #0 {
94; CHECK-LABEL: define { i64, i1 } @test_ssub_with_overflow(
95; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] {
96; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
97; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
98; CHECK-NEXT:    call void @llvm.donothing()
99; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
100; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i64 [[TMP2]], 0
101; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
102; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
103; CHECK:       3:
104; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
105; CHECK-NEXT:    unreachable
106; CHECK:       4:
107; CHECK-NEXT:    [[RES:%.*]] = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 [[A]], i64 [[B]])
108; CHECK-NEXT:    store { i64, i1 } zeroinitializer, ptr @__msan_retval_tls, align 8
109; CHECK-NEXT:    ret { i64, i1 } [[RES]]
110;
111  %res = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 %a, i64 %b)
112  ret { i64, i1 } %res
113}
114define {i64, i1} @test_usub_with_overflow(i64 %a, i64 %b) #0 {
115; CHECK-LABEL: define { i64, i1 } @test_usub_with_overflow(
116; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] {
117; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
118; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
119; CHECK-NEXT:    call void @llvm.donothing()
120; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
121; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i64 [[TMP2]], 0
122; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
123; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
124; CHECK:       3:
125; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
126; CHECK-NEXT:    unreachable
127; CHECK:       4:
128; CHECK-NEXT:    [[RES:%.*]] = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 [[A]], i64 [[B]])
129; CHECK-NEXT:    store { i64, i1 } zeroinitializer, ptr @__msan_retval_tls, align 8
130; CHECK-NEXT:    ret { i64, i1 } [[RES]]
131;
132  %res = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
133  ret { i64, i1 } %res
134}
135
136define {<4 x i32>, <4 x i1>} @test_sadd_with_overflow_vec(<4 x i32> %a, <4 x i32> %b) #0 {
137; CHECK-LABEL: define { <4 x i32>, <4 x i1> } @test_sadd_with_overflow_vec(
138; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]) #[[ATTR0]] {
139; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
140; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
141; CHECK-NEXT:    call void @llvm.donothing()
142; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
143; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
144; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
145; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i128 [[TMP4]], 0
146; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
147; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF0]]
148; CHECK:       5:
149; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
150; CHECK-NEXT:    unreachable
151; CHECK:       6:
152; CHECK-NEXT:    [[RES:%.*]] = call { <4 x i32>, <4 x i1> } @llvm.sadd.with.overflow.v4i32(<4 x i32> [[A]], <4 x i32> [[B]])
153; CHECK-NEXT:    store { <4 x i32>, <4 x i1> } zeroinitializer, ptr @__msan_retval_tls, align 8
154; CHECK-NEXT:    ret { <4 x i32>, <4 x i1> } [[RES]]
155;
156  %res = call { <4 x i32>, <4 x i1> } @llvm.sadd.with.overflow.v4i32(<4 x i32> %a, <4 x i32> %b)
157  ret { <4 x i32>, <4 x i1> } %res
158}
159
160attributes #0 = { sanitize_memory }
161;.
162; CHECK: [[PROF0]] = !{!"branch_weights", i32 1, i32 1000}
163;.
164