1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt < %s -passes=instcombine -S | FileCheck %s 3; 4; Verify that the result of memrchr calls with past-the-end pointers used 5; don't cause trouble and are optimally folded. 6 7declare i32 @memcmp(ptr, ptr, i64) 8 9 10@a5 = constant [5 x i8] c"12345"; 11 12 13; Fold memcmp(a5, a5 + 5, n) to 0 on the assumption that n is 0 otherwise 14; the call would be undefined. 15 16define i32 @fold_memcmp_a5_a5p5_n(i64 %n) { 17; CHECK-LABEL: @fold_memcmp_a5_a5p5_n( 18; CHECK-NEXT: ret i32 0 19; 20 %pa5_p5 = getelementptr [5 x i8], ptr @a5, i32 0, i32 5 21 %cmp = call i32 @memcmp(ptr @a5, ptr %pa5_p5, i64 %n) 22 ret i32 %cmp 23} 24 25 26; Same as above but for memcmp(a5 + 5, a5 + 5, n). 27 28define i32 @fold_memcmp_a5p5_a5p5_n(i64 %n) { 29; CHECK-LABEL: @fold_memcmp_a5p5_a5p5_n( 30; CHECK-NEXT: ret i32 0 31; 32 %pa5_p5 = getelementptr [5 x i8], ptr @a5, i32 0, i32 5 33 %qa5_p5 = getelementptr [5 x i8], ptr @a5, i32 0, i32 5 34 %cmp = call i32 @memcmp(ptr %pa5_p5, ptr %qa5_p5, i64 %n) 35 ret i32 %cmp 36} 37 38 39; TODO: Likewise, fold memcmp(a5 + i, a5 + 5, n) to 0 on same basis. 40 41define i32 @fold_memcmp_a5pi_a5p5_n(i32 %i, i64 %n) { 42; CHECK-LABEL: @fold_memcmp_a5pi_a5p5_n( 43; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[I:%.*]] to i64 44; CHECK-NEXT: [[PA5_PI:%.*]] = getelementptr [5 x i8], ptr @a5, i64 0, i64 [[TMP1]] 45; CHECK-NEXT: [[CMP:%.*]] = call i32 @memcmp(ptr [[PA5_PI]], ptr nonnull getelementptr inbounds nuw (i8, ptr @a5, i64 5), i64 [[N:%.*]]) 46; CHECK-NEXT: ret i32 [[CMP]] 47; 48 %pa5_pi = getelementptr [5 x i8], ptr @a5, i32 0, i32 %i 49 %pa5_p5 = getelementptr [5 x i8], ptr @a5, i32 0, i32 5 50 %cmp = call i32 @memcmp(ptr %pa5_pi, ptr %pa5_p5, i64 %n) 51 ret i32 %cmp 52} 53