xref: /llvm-project/llvm/test/Transforms/InstCombine/memcmp-constant-fold.ll (revision 4ab40eca080965c65802710e39adbb78c4ce7bde)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -passes=instcombine -S -data-layout=e-n32 | FileCheck %s --check-prefix=ALL --check-prefix=LE
3; RUN: opt < %s -passes=instcombine -S -data-layout=E-n32 | FileCheck %s --check-prefix=ALL --check-prefix=BE
4
5declare i32 @memcmp(ptr, ptr, i64)
6
7; The alignment of this constant does not matter. We constant fold the load.
8
9@charbuf = private unnamed_addr constant [4 x i8] [i8 0, i8 0, i8 0, i8 1], align 1
10
11define i1 @memcmp_4bytes_unaligned_constant_i8(ptr align 4 %x) {
12; LE-LABEL: @memcmp_4bytes_unaligned_constant_i8(
13; LE-NEXT:    [[LHSV:%.*]] = load i32, ptr [[X:%.*]], align 4
14; LE-NEXT:    [[DOTNOT:%.*]] = icmp eq i32 [[LHSV]], 16777216
15; LE-NEXT:    ret i1 [[DOTNOT]]
16;
17; BE-LABEL: @memcmp_4bytes_unaligned_constant_i8(
18; BE-NEXT:    [[LHSV:%.*]] = load i32, ptr [[X:%.*]], align 4
19; BE-NEXT:    [[DOTNOT:%.*]] = icmp eq i32 [[LHSV]], 1
20; BE-NEXT:    ret i1 [[DOTNOT]]
21;
22  %call = tail call i32 @memcmp(ptr %x, ptr @charbuf, i64 4)
23  %cmpeq0 = icmp eq i32 %call, 0
24  ret i1 %cmpeq0
25}
26
27; We still don't care about alignment of the constant. We are not limited to constant folding only i8 arrays.
28; It doesn't matter if the constant operand is the first operand to the memcmp.
29
30@intbuf_unaligned = private unnamed_addr constant [4 x i16] [i16 1, i16 2, i16 3, i16 4], align 1
31
32define i1 @memcmp_4bytes_unaligned_constant_i16(ptr align 4 %x) {
33; LE-LABEL: @memcmp_4bytes_unaligned_constant_i16(
34; LE-NEXT:    [[RHSV:%.*]] = load i32, ptr [[X:%.*]], align 4
35; LE-NEXT:    [[DOTNOT:%.*]] = icmp eq i32 [[RHSV]], 131073
36; LE-NEXT:    ret i1 [[DOTNOT]]
37;
38; BE-LABEL: @memcmp_4bytes_unaligned_constant_i16(
39; BE-NEXT:    [[RHSV:%.*]] = load i32, ptr [[X:%.*]], align 4
40; BE-NEXT:    [[DOTNOT:%.*]] = icmp eq i32 [[RHSV]], 65538
41; BE-NEXT:    ret i1 [[DOTNOT]]
42;
43  %call = tail call i32 @memcmp(ptr @intbuf_unaligned, ptr %x, i64 4)
44  %cmpeq0 = icmp eq i32 %call, 0
45  ret i1 %cmpeq0
46}
47
48; Verif that a memcmp call where all arguments are constants is constant
49; folded even for arrays of other types than i8.
50
51@intbuf = private unnamed_addr constant [2 x i32] [i32 0, i32 1], align 4
52
53define i1 @memcmp_3bytes_aligned_constant_i32(ptr align 4 %x) {
54; LE-LABEL: @memcmp_3bytes_aligned_constant_i32(
55; LE-NEXT:    ret i1 false
56;
57; BE-LABEL: @memcmp_3bytes_aligned_constant_i32(
58; BE-NEXT:    ret i1 true
59;
60  %call = tail call i32 @memcmp(ptr getelementptr inbounds ([2 x i32], ptr @intbuf, i64 0, i64 1), ptr @intbuf, i64 3)
61  %cmpeq0 = icmp eq i32 %call, 0
62  ret i1 %cmpeq0
63}
64
65; A sloppy implementation would infinite loop by recreating the unused instructions.
66
67define i1 @memcmp_4bytes_one_unaligned_i8(ptr align 4 %x, ptr align 1 %y) {
68; ALL-LABEL: @memcmp_4bytes_one_unaligned_i8(
69; ALL-NEXT:    [[CALL:%.*]] = tail call i32 @memcmp(ptr noundef nonnull dereferenceable(4) [[X:%.*]], ptr noundef nonnull dereferenceable(4) [[Y:%.*]], i64 4)
70; ALL-NEXT:    [[CMPEQ0:%.*]] = icmp eq i32 [[CALL]], 0
71; ALL-NEXT:    ret i1 [[CMPEQ0]]
72;
73  %lhsv = load i32, ptr %x
74  %call = tail call i32 @memcmp(ptr %x, ptr %y, i64 4)
75  %cmpeq0 = icmp eq i32 %call, 0
76  ret i1 %cmpeq0
77}
78
79