xref: /llvm-project/llvm/test/Transforms/InstCombine/memchr-11.ll (revision 4ab40eca080965c65802710e39adbb78c4ce7bde)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -passes=instcombine -S | FileCheck %s
3;
4; Verify that the result of memchr calls used in equality expressions
5; with either the first argument or null are optimally folded.
6
7declare ptr @memchr(ptr, i32, i64)
8
9
10@a5 = constant [5 x i8] c"12345"
11
12; Fold memchr(a5, c, 5) == a5 to *a5 == c.
13
14define i1 @fold_memchr_a_c_5_eq_a(i32 %c) {
15; CHECK-LABEL: @fold_memchr_a_c_5_eq_a(
16; CHECK-NEXT:    [[TMP1:%.*]] = trunc i32 [[C:%.*]] to i8
17; CHECK-NEXT:    [[CHAR0CMP:%.*]] = icmp eq i8 [[TMP1]], 49
18; CHECK-NEXT:    ret i1 [[CHAR0CMP]]
19;
20  %q = call ptr @memchr(ptr @a5, i32 %c, i64 5)
21  %cmp = icmp eq ptr %q, @a5
22  ret i1 %cmp
23}
24
25
26; Fold memchr(a5, c, n) == a5 to n && *a5 == c.  Unlike the case when
27; the first argument is an arbitrary, including potentially past-the-end,
28; pointer, this is safe because a5 is dereferenceable.
29
30define i1 @fold_memchr_a_c_n_eq_a(i32 %c, i64 %n) {
31; CHECK-LABEL: @fold_memchr_a_c_n_eq_a(
32; CHECK-NEXT:    [[TMP1:%.*]] = trunc i32 [[C:%.*]] to i8
33; CHECK-NEXT:    [[CHAR0CMP:%.*]] = icmp eq i8 [[TMP1]], 49
34; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i64 [[N:%.*]], 0
35; CHECK-NEXT:    [[TMP3:%.*]] = select i1 [[TMP2]], i1 [[CHAR0CMP]], i1 false
36; CHECK-NEXT:    ret i1 [[TMP3]]
37;
38  %q = call ptr @memchr(ptr @a5, i32 %c, i64 %n)
39  %cmp = icmp eq ptr %q, @a5
40  ret i1 %cmp
41}
42
43
44; Do not fold memchr(a5 + i, c, n).
45
46define i1 @call_memchr_api_c_n_eq_a(i64 %i, i32 %c, i64 %n) {
47; CHECK-LABEL: @call_memchr_api_c_n_eq_a(
48; CHECK-NEXT:    [[P:%.*]] = getelementptr [5 x i8], ptr @a5, i64 0, i64 [[I:%.*]]
49; CHECK-NEXT:    [[Q:%.*]] = call ptr @memchr(ptr [[P]], i32 [[C:%.*]], i64 [[N:%.*]])
50; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr [[Q]], [[P]]
51; CHECK-NEXT:    ret i1 [[CMP]]
52;
53  %p = getelementptr [5 x i8], ptr @a5, i64 0, i64 %i
54  %q = call ptr @memchr(ptr %p, i32 %c, i64 %n)
55  %cmp = icmp eq ptr %q, %p
56  ret i1 %cmp
57}
58
59
60; Fold memchr(s, c, 15) == s to *s == c.
61
62define i1 @fold_memchr_s_c_15_eq_s(ptr %s, i32 %c) {
63; CHECK-LABEL: @fold_memchr_s_c_15_eq_s(
64; CHECK-NEXT:    [[TMP1:%.*]] = load i8, ptr [[S:%.*]], align 1
65; CHECK-NEXT:    [[TMP2:%.*]] = trunc i32 [[C:%.*]] to i8
66; CHECK-NEXT:    [[CHAR0CMP:%.*]] = icmp eq i8 [[TMP1]], [[TMP2]]
67; CHECK-NEXT:    ret i1 [[CHAR0CMP]]
68;
69  %p = call ptr @memchr(ptr %s, i32 %c, i64 15)
70  %cmp = icmp eq ptr %p, %s
71  ret i1 %cmp
72}
73
74
75; Fold memchr(s, c, 17) != s to *s != c.
76
77define i1 @fold_memchr_s_c_17_neq_s(ptr %s, i32 %c) {
78; CHECK-LABEL: @fold_memchr_s_c_17_neq_s(
79; CHECK-NEXT:    [[TMP1:%.*]] = load i8, ptr [[S:%.*]], align 1
80; CHECK-NEXT:    [[TMP2:%.*]] = trunc i32 [[C:%.*]] to i8
81; CHECK-NEXT:    [[CHAR0CMP:%.*]] = icmp ne i8 [[TMP1]], [[TMP2]]
82; CHECK-NEXT:    ret i1 [[CHAR0CMP]]
83;
84  %p = call ptr @memchr(ptr %s, i32 %c, i64 17)
85  %cmp = icmp ne ptr %p, %s
86  ret i1 %cmp
87}
88
89
90; Fold memchr(s, c, n) == s to *s == c for nonzero n.
91
92define i1 @fold_memchr_s_c_nz_eq_s(ptr %s, i32 %c, i64 %n) {
93; CHECK-LABEL: @fold_memchr_s_c_nz_eq_s(
94; CHECK-NEXT:    [[TMP1:%.*]] = load i8, ptr [[S:%.*]], align 1
95; CHECK-NEXT:    [[TMP2:%.*]] = trunc i32 [[C:%.*]] to i8
96; CHECK-NEXT:    [[CHAR0CMP:%.*]] = icmp eq i8 [[TMP1]], [[TMP2]]
97; CHECK-NEXT:    ret i1 [[CHAR0CMP]]
98;
99  %nz = or i64 %n, 1
100  %p = call ptr @memchr(ptr %s, i32 %c, i64 %nz)
101  %cmp = icmp eq ptr %p, %s
102  ret i1 %cmp
103}
104
105
106; But do not fold memchr(s, c, n) as above if n might be zero.  This could
107; be optimized to the equivalent of N && *S == C provided a short-circuiting
108; AND, otherwise the load could read a byte just past the end of an array.
109
110define i1 @call_memchr_s_c_n_eq_s(ptr %s, i32 %c, i64 %n) {
111; CHECK-LABEL: @call_memchr_s_c_n_eq_s(
112; CHECK-NEXT:    [[P:%.*]] = call ptr @memchr(ptr [[S:%.*]], i32 [[C:%.*]], i64 [[N:%.*]])
113; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr [[P]], [[S]]
114; CHECK-NEXT:    ret i1 [[CMP]]
115;
116  %p = call ptr @memchr(ptr %s, i32 %c, i64 %n)
117  %cmp = icmp eq ptr %p, %s
118  ret i1 %cmp
119}
120