xref: /llvm-project/llvm/test/Transforms/InstCombine/memrchr-8.ll (revision fcfc31fffb9a83416453e60bd0dff2df93c2ee20)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -passes=instcombine -S | FileCheck %s
3;
4; Verify that the result of memrchr calls used in equality expressions
5; with the first argument aren't folded like the corresponding calls
6; to memchr might be.
7; Folding of equality expressions with the first argument plus the bound
8; -1, i.e., memrchr(S, C, N) == N && S[N - 1] == C is not implemented.
9
10declare ptr @memrchr(ptr, i32, i64)
11
12
13@a5 = constant [5 x i8] c"12345";
14
15; Do not fold memrchr(a5, c, 9) == a5.  The corresponding call to memchr
16; is folded so this test verifies that the memrchr folder doesn't make
17; the wrong assumption.  The bound of 9 tries to avoid having to adjust
18; the test if the call is folded into a series of ORs as in D128011.
19
20define i1 @call_memrchr_a_c_9_eq_a(i32 %c) {
21; CHECK-LABEL: @call_memrchr_a_c_9_eq_a(
22; CHECK-NEXT:    [[Q:%.*]] = call ptr @memrchr(ptr noundef nonnull dereferenceable(9) @a5, i32 [[C:%.*]], i64 9)
23; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr [[Q]], @a5
24; CHECK-NEXT:    ret i1 [[CMP]]
25;
26  %q = call ptr @memrchr(ptr @a5, i32 %c, i64 9)
27  %cmp = icmp eq ptr %q, @a5
28  ret i1 %cmp
29}
30
31
32; Do not fold memrchr(a5, c, n).
33
34define i1 @call_memrchr_a_c_n_eq_a(i32 %c, i64 %n) {
35; CHECK-LABEL: @call_memrchr_a_c_n_eq_a(
36; CHECK-NEXT:    [[Q:%.*]] = call ptr @memrchr(ptr nonnull @a5, i32 [[C:%.*]], i64 [[N:%.*]])
37; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr [[Q]], @a5
38; CHECK-NEXT:    ret i1 [[CMP]]
39;
40  %q = call ptr @memrchr(ptr @a5, i32 %c, i64 %n)
41  %cmp = icmp eq ptr %q, @a5
42  ret i1 %cmp
43}
44
45
46; Do not fold memrchr(s, c, 17).
47
48define i1 @call_memrchr_s_c_17_eq_s(ptr %s, i32 %c) {
49; CHECK-LABEL: @call_memrchr_s_c_17_eq_s(
50; CHECK-NEXT:    [[P:%.*]] = call ptr @memrchr(ptr noundef nonnull dereferenceable(17) [[S:%.*]], i32 [[C:%.*]], i64 17)
51; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr [[P]], [[S]]
52; CHECK-NEXT:    ret i1 [[CMP]]
53;
54  %p = call ptr @memrchr(ptr %s, i32 %c, i64 17)
55  %cmp = icmp eq ptr %p, %s
56  ret i1 %cmp
57}
58
59
60; Do not fold memrchr(s, c, 9).
61
62define i1 @call_memrchr_s_c_9_neq_s(ptr %s, i32 %c) {
63; CHECK-LABEL: @call_memrchr_s_c_9_neq_s(
64; CHECK-NEXT:    [[P:%.*]] = call ptr @memrchr(ptr noundef nonnull dereferenceable(7) [[S:%.*]], i32 [[C:%.*]], i64 7)
65; CHECK-NEXT:    [[CMP:%.*]] = icmp ne ptr [[P]], [[S]]
66; CHECK-NEXT:    ret i1 [[CMP]]
67;
68  %p = call ptr @memrchr(ptr %s, i32 %c, i64 7)
69  %cmp = icmp ne ptr %p, %s
70  ret i1 %cmp
71}
72
73
74; Do not fold memrchr(s, c, n).
75
76define i1 @fold_memrchr_s_c_n_eq_s(ptr %s, i32 %c, i64 %n) {
77; CHECK-LABEL: @fold_memrchr_s_c_n_eq_s(
78; CHECK-NEXT:    [[P:%.*]] = call ptr @memrchr(ptr [[S:%.*]], i32 [[C:%.*]], i64 [[N:%.*]])
79; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr [[P]], [[S]]
80; CHECK-NEXT:    ret i1 [[CMP]]
81;
82  %p = call ptr @memrchr(ptr %s, i32 %c, i64 %n)
83  %cmp = icmp eq ptr %p, %s
84  ret i1 %cmp
85}
86