1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py 2 /// Check that the alignment builtins handle array-to-pointer decay 3 // RUN: %clang_cc1 -triple=x86_64-unknown-unknown -o - -emit-llvm %s | FileCheck %s 4 5 extern int func(char *c); 6 7 // CHECK-LABEL: @test_array( 8 // CHECK-NEXT: entry: 9 // CHECK-NEXT: [[BUF:%.*]] = alloca [1024 x i8], align 16 10 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[BUF]], i64 0, i64 44 11 // CHECK-NEXT: [[INTPTR:%.*]] = ptrtoint i8* [[ARRAYIDX]] to i64 12 // CHECK-NEXT: [[ALIGNED_INTPTR:%.*]] = and i64 [[INTPTR]], -16 13 // CHECK-NEXT: [[DIFF:%.*]] = sub i64 [[ALIGNED_INTPTR]], [[INTPTR]] 14 // CHECK-NEXT: [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[ARRAYIDX]], i64 [[DIFF]] 15 // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[ALIGNED_RESULT]], i64 16) ] 16 // CHECK-NEXT: [[CALL:%.*]] = call i32 @func(i8* [[ALIGNED_RESULT]]) 17 // CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[BUF]], i64 0, i64 22 18 // CHECK-NEXT: [[INTPTR2:%.*]] = ptrtoint i8* [[ARRAYIDX1]] to i64 19 // CHECK-NEXT: [[OVER_BOUNDARY:%.*]] = add i64 [[INTPTR2]], 31 20 // CHECK-NEXT: [[ALIGNED_INTPTR4:%.*]] = and i64 [[OVER_BOUNDARY]], -32 21 // CHECK-NEXT: [[DIFF5:%.*]] = sub i64 [[ALIGNED_INTPTR4]], [[INTPTR2]] 22 // CHECK-NEXT: [[ALIGNED_RESULT6:%.*]] = getelementptr inbounds i8, i8* [[ARRAYIDX1]], i64 [[DIFF5]] 23 // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[ALIGNED_RESULT6]], i64 32) ] 24 // CHECK-NEXT: [[CALL7:%.*]] = call i32 @func(i8* [[ALIGNED_RESULT6]]) 25 // CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[BUF]], i64 0, i64 16 26 // CHECK-NEXT: [[SRC_ADDR:%.*]] = ptrtoint i8* [[ARRAYIDX8]] to i64 27 // CHECK-NEXT: [[SET_BITS:%.*]] = and i64 [[SRC_ADDR]], 63 28 // CHECK-NEXT: [[IS_ALIGNED:%.*]] = icmp eq i64 [[SET_BITS]], 0 29 // CHECK-NEXT: [[CONV:%.*]] = zext i1 [[IS_ALIGNED]] to i32 30 // CHECK-NEXT: ret i32 [[CONV]] 31 // 32 int test_array(void) { 33 char buf[1024]; 34 func(__builtin_align_down(&buf[44], 16)); 35 func(__builtin_align_up(&buf[22], 32)); 36 return __builtin_is_aligned(&buf[16], 64); 37 } 38 39 // CHECK-LABEL: @test_array_should_not_mask( 40 // CHECK-NEXT: entry: 41 // CHECK-NEXT: [[BUF:%.*]] = alloca [1024 x i8], align 32 42 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[BUF]], i64 0, i64 64 43 // CHECK-NEXT: [[INTPTR:%.*]] = ptrtoint i8* [[ARRAYIDX]] to i64 44 // CHECK-NEXT: [[ALIGNED_INTPTR:%.*]] = and i64 [[INTPTR]], -16 45 // CHECK-NEXT: [[DIFF:%.*]] = sub i64 [[ALIGNED_INTPTR]], [[INTPTR]] 46 // CHECK-NEXT: [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[ARRAYIDX]], i64 [[DIFF]] 47 // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[ALIGNED_RESULT]], i64 16) ] 48 // CHECK-NEXT: [[CALL:%.*]] = call i32 @func(i8* [[ALIGNED_RESULT]]) 49 // CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[BUF]], i64 0, i64 32 50 // CHECK-NEXT: [[INTPTR2:%.*]] = ptrtoint i8* [[ARRAYIDX1]] to i64 51 // CHECK-NEXT: [[OVER_BOUNDARY:%.*]] = add i64 [[INTPTR2]], 31 52 // CHECK-NEXT: [[ALIGNED_INTPTR4:%.*]] = and i64 [[OVER_BOUNDARY]], -32 53 // CHECK-NEXT: [[DIFF5:%.*]] = sub i64 [[ALIGNED_INTPTR4]], [[INTPTR2]] 54 // CHECK-NEXT: [[ALIGNED_RESULT6:%.*]] = getelementptr inbounds i8, i8* [[ARRAYIDX1]], i64 [[DIFF5]] 55 // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[ALIGNED_RESULT6]], i64 32) ] 56 // CHECK-NEXT: [[CALL7:%.*]] = call i32 @func(i8* [[ALIGNED_RESULT6]]) 57 // CHECK-NEXT: ret i32 1 58 // 59 int test_array_should_not_mask(void) { 60 _Alignas(32) char buf[1024]; 61 // TODO: The align_up and align_down calls should be folded to no-ops 62 func(__builtin_align_down(&buf[64], 16)); 63 func(__builtin_align_up(&buf[32], 32)); 64 // This expression can be constant-evaluated: 65 return __builtin_is_aligned(&buf[64], 32); 66 } 67