1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt -passes=lower-constant-intrinsics -S < %s | FileCheck %s 3 4target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" 5target triple = "x86_64-apple-darwin10.0.0" 6 7declare i64 @llvm.objectsize.i64(ptr, i1, i1, i1) nounwind readonly 8declare i64 @llvm.objectsize.i64.p1(ptr addrspace(1), i1, i1, i1) nounwind readonly 9declare void @llvm.trap() nounwind 10 11; objectsize should fold to a constant, which causes the branch to fold to an 12; uncond branch. 13define i32 @test1(ptr %ptr) nounwind ssp noredzone align 2 { 14; CHECK-LABEL: @test1( 15; CHECK-NEXT: entry: 16; CHECK-NEXT: br label [[T:%.*]] 17; CHECK: T: 18; CHECK-NEXT: ret i32 4 19; 20entry: 21 %0 = tail call i64 @llvm.objectsize.i64(ptr %ptr, i1 false, i1 false, i1 false) 22 %1 = icmp ugt i64 %0, 3 23 br i1 %1, label %T, label %trap 24 25 26trap: ; preds = %0, %entry 27 tail call void @llvm.trap() noreturn nounwind 28 unreachable 29 30T: 31 ret i32 4 32} 33 34define i64 @test_objectsize_null_flag(ptr %ptr) { 35; CHECK-LABEL: @test_objectsize_null_flag( 36; CHECK-NEXT: entry: 37; CHECK-NEXT: ret i64 -1 38; 39entry: 40 %0 = tail call i64 @llvm.objectsize.i64(ptr null, i1 false, i1 true, i1 false) 41 ret i64 %0 42} 43 44define i64 @test_objectsize_null_flag_min(ptr %ptr) { 45; CHECK-LABEL: @test_objectsize_null_flag_min( 46; CHECK-NEXT: entry: 47; CHECK-NEXT: ret i64 0 48; 49entry: 50 %0 = tail call i64 @llvm.objectsize.i64(ptr null, i1 true, i1 true, i1 false) 51 ret i64 %0 52} 53 54; Test foldable null pointers because we evaluate them with non-exact modes in 55; CodeGenPrepare. 56define i64 @test_objectsize_null_flag_noas0() { 57; CHECK-LABEL: @test_objectsize_null_flag_noas0( 58; CHECK-NEXT: entry: 59; CHECK-NEXT: ret i64 -1 60; 61entry: 62 %0 = tail call i64 @llvm.objectsize.i64.p1(ptr addrspace(1) null, i1 false, 63 i1 true, i1 false) 64 ret i64 %0 65} 66 67define i64 @test_objectsize_null_flag_min_noas0() { 68; CHECK-LABEL: @test_objectsize_null_flag_min_noas0( 69; CHECK-NEXT: entry: 70; CHECK-NEXT: ret i64 0 71; 72entry: 73 %0 = tail call i64 @llvm.objectsize.i64.p1(ptr addrspace(1) null, i1 true, 74 i1 true, i1 false) 75 ret i64 %0 76} 77 78define i64 @test_objectsize_null_known_flag_noas0() { 79; CHECK-LABEL: @test_objectsize_null_known_flag_noas0( 80; CHECK-NEXT: entry: 81; CHECK-NEXT: ret i64 -1 82; 83entry: 84 %0 = tail call i64 @llvm.objectsize.i64.p1(ptr addrspace(1) null, i1 false, 85 i1 false, i1 false) 86 ret i64 %0 87} 88 89define i64 @test_objectsize_null_known_flag_min_noas0() { 90; CHECK-LABEL: @test_objectsize_null_known_flag_min_noas0( 91; CHECK-NEXT: entry: 92; CHECK-NEXT: ret i64 0 93; 94entry: 95 %0 = tail call i64 @llvm.objectsize.i64.p1(ptr addrspace(1) null, i1 true, 96 i1 false, i1 false) 97 ret i64 %0 98} 99 100define i64 @test_objectsize_byval_arg(ptr byval([42 x i8]) %ptr) { 101; CHECK-LABEL: @test_objectsize_byval_arg( 102; CHECK-NEXT: ret i64 42 103; 104 %size = tail call i64 @llvm.objectsize.i64(ptr %ptr, i1 true, i1 false, i1 false) 105 ret i64 %size 106} 107 108define i64 @test_objectsize_byref_arg(ptr byref([42 x i8]) %ptr) { 109; CHECK-LABEL: @test_objectsize_byref_arg( 110; CHECK-NEXT: ret i64 42 111; 112 %size = tail call i64 @llvm.objectsize.i64(ptr %ptr, i1 true, i1 false, i1 false) 113 ret i64 %size 114} 115 116; https://llvm.org/PR50023 117; The alloca operand type may not match pointer type size. 118 119define i64 @vla_pointer_size_mismatch(i42 %x) { 120; CHECK-LABEL: @vla_pointer_size_mismatch( 121; CHECK-NEXT: [[TMP1:%.*]] = zext i42 [[X:%.*]] to i64 122; CHECK-NEXT: [[TMP2:%.*]] = mul i64 1, [[TMP1]] 123; CHECK-NEXT: [[A:%.*]] = alloca i8, i42 [[X]], align 1 124; CHECK-NEXT: [[G1:%.*]] = getelementptr i8, ptr [[A]], i8 17 125; CHECK-NEXT: [[TMP3:%.*]] = sub i64 [[TMP2]], 17 126; CHECK-NEXT: [[TMP4:%.*]] = icmp ult i64 [[TMP2]], 17 127; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 0, i64 [[TMP3]] 128; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i64 [[TMP5]], -1 129; CHECK-NEXT: call void @llvm.assume(i1 [[TMP6]]) 130; CHECK-NEXT: ret i64 [[TMP5]] 131; 132 %A = alloca i8, i42 %x, align 1 133 %G1 = getelementptr i8, ptr %A, i8 17 134 %objsize = call i64 @llvm.objectsize.i64(ptr %G1, i1 false, i1 true, i1 true) 135 ret i64 %objsize 136} 137 138declare ptr @malloc(i64) allocsize(0) 139 140define i64 @test_objectsize_malloc() { 141; CHECK-LABEL: @test_objectsize_malloc( 142; CHECK-NEXT: [[PTR:%.*]] = call ptr @malloc(i64 16) 143; CHECK-NEXT: ret i64 16 144; 145 %ptr = call ptr @malloc(i64 16) 146 %objsize = call i64 @llvm.objectsize.i64(ptr %ptr, i1 false, i1 true, i1 true) 147 ret i64 %objsize 148} 149 150@gv_weak = weak global i64 zeroinitializer, align 16 151 152define i32 @promote_with_objectsize_min_false() { 153; CHECK-LABEL: @promote_with_objectsize_min_false( 154; CHECK-NEXT: ret i32 -1 155; 156 %size = call i32 @llvm.objectsize.i32.p0(ptr @gv_weak, i1 false, i1 false, i1 false) 157 ret i32 %size 158} 159 160define i32 @promote_with_objectsize_min_true() { 161; CHECK-LABEL: @promote_with_objectsize_min_true( 162; CHECK-NEXT: ret i32 8 163; 164 %size = call i32 @llvm.objectsize.i32.p0(ptr @gv_weak, i1 true, i1 false, i1 false) 165 ret i32 %size 166} 167 168@gv_extern = extern_weak global i64, align 16 169 170define i32 @promote_with_objectsize_nullunknown_false() { 171; CHECK-LABEL: @promote_with_objectsize_nullunknown_false( 172; CHECK-NEXT: ret i32 0 173; 174 %size = call i32 @llvm.objectsize.i32.p0(ptr @gv_extern, i1 true, i1 false, i1 false) 175 ret i32 %size 176} 177 178define i32 @promote_with_objectsize_nullunknown_true() { 179; CHECK-LABEL: @promote_with_objectsize_nullunknown_true( 180; CHECK-NEXT: ret i32 0 181; 182 %size = call i32 @llvm.objectsize.i32.p0(ptr @gv_extern, i1 true, i1 true, i1 false) 183 ret i32 %size 184} 185 186define i64 @out_of_bound_gep() { 187; CHECK-LABEL: @out_of_bound_gep( 188; CHECK-NEXT: [[OBJ:%.*]] = alloca i8, i32 4, align 1 189; CHECK-NEXT: [[SLIDE:%.*]] = getelementptr i8, ptr [[OBJ]], i8 8 190; CHECK-NEXT: ret i64 0 191; 192 %obj = alloca i8, i32 4 193 %slide = getelementptr i8, ptr %obj, i8 8 194 %objsize = call i64 @llvm.objectsize.i64(ptr %slide, i1 false, i1 false, i1 false) 195 ret i64 %objsize 196} 197 198define i64 @wrapping_gep(i1 %c) { 199; CHECK-LABEL: @wrapping_gep( 200; CHECK-NEXT: [[OBJ:%.*]] = alloca i8, i64 4, align 1 201; CHECK-NEXT: [[SLIDE:%.*]] = getelementptr i8, ptr [[OBJ]], i64 -9223372036854775807 202; CHECK-NEXT: [[SLIDE_BIS:%.*]] = getelementptr i8, ptr [[SLIDE]], i64 -9223372036854775808 203; CHECK-NEXT: ret i64 3 204; 205 %obj = alloca i8, i64 4 206 %slide = getelementptr i8, ptr %obj, i64 9223372036854775809 207 %slide.bis = getelementptr i8, ptr %slide, i64 9223372036854775808 208 %objsize = call i64 @llvm.objectsize.i64(ptr %slide.bis, i1 false, i1 false, i1 false) 209 ret i64 %objsize 210} 211 212define i64 @wrapping_gep_neg(i1 %c) { 213; CHECK-LABEL: @wrapping_gep_neg( 214; CHECK-NEXT: [[OBJ:%.*]] = alloca i8, i64 4, align 1 215; CHECK-NEXT: [[SLIDE:%.*]] = getelementptr i8, ptr [[OBJ]], i64 9223372036854775807 216; CHECK-NEXT: [[SLIDE_BIS:%.*]] = getelementptr i8, ptr [[SLIDE]], i64 9223372036854775807 217; CHECK-NEXT: ret i64 -1 218; 219 %obj = alloca i8, i64 4 220 %slide = getelementptr i8, ptr %obj, i64 9223372036854775807 221 %slide.bis = getelementptr i8, ptr %slide, i64 9223372036854775807 222 %objsize = call i64 @llvm.objectsize.i64(ptr %slide.bis, i1 false, i1 false, i1 false) 223 ret i64 %objsize 224} 225 226define i64 @wrapping_gep_large_alloc(i1 %c) { 227; CHECK-LABEL: @wrapping_gep_large_alloc( 228; CHECK-NEXT: [[OBJ:%.*]] = alloca i8, i64 9223372036854775807, align 1 229; CHECK-NEXT: [[SLIDE:%.*]] = getelementptr i8, ptr [[OBJ]], i64 9223372036854775807 230; CHECK-NEXT: [[SLIDE_BIS:%.*]] = getelementptr i8, ptr [[SLIDE]], i64 3 231; CHECK-NEXT: [[SLIDE_TER:%.*]] = getelementptr i8, ptr [[SLIDE_BIS]], i64 -4 232; CHECK-NEXT: ret i64 1 233; 234 %obj = alloca i8, i64 9223372036854775807 235 %slide = getelementptr i8, ptr %obj, i64 9223372036854775807 236 %slide.bis = getelementptr i8, ptr %slide, i64 3 237 %slide.ter = getelementptr i8, ptr %slide.bis, i64 -4 238 %objsize = call i64 @llvm.objectsize.i64(ptr %slide.ter, i1 false, i1 false, i1 false) 239 ret i64 %objsize 240} 241 242; We don't analyze allocations larger than platform's ptrdiff_t 243define i64 @large_alloca() { 244; CHECK-LABEL: @large_alloca( 245; CHECK-NEXT: [[OBJ:%.*]] = alloca i8, i64 -9223372036854775808, align 1 246; CHECK-NEXT: [[SLIDE:%.*]] = getelementptr i8, ptr [[OBJ]], i64 9223372036854775807 247; CHECK-NEXT: ret i64 -1 248; 249 %obj = alloca i8, i64 9223372036854775808 250 %slide = getelementptr i8, ptr %obj, i64 9223372036854775807 251 %objsize = call i64 @llvm.objectsize.i64(ptr %slide, i1 false, i1 false, i1 false) 252 ret i64 %objsize 253} 254 255; We don't analyze allocations larger than platform's ptrdiff_t 256define i64 @large_malloc() { 257; CHECK-LABEL: @large_malloc( 258; CHECK-NEXT: [[OBJ:%.*]] = call ptr @malloc(i64 -9223372036854775808) 259; CHECK-NEXT: [[SLIDE:%.*]] = getelementptr i8, ptr [[OBJ]], i64 9223372036854775807 260; CHECK-NEXT: ret i64 -1 261; 262 %obj = call ptr @malloc(i64 9223372036854775808) 263 %slide = getelementptr i8, ptr %obj, i64 9223372036854775807 264 %objsize = call i64 @llvm.objectsize.i64(ptr %slide, i1 false, i1 false, i1 false) 265 ret i64 %objsize 266} 267 268define i64 @out_of_bound_negative_gep(i1 %c) { 269; CHECK-LABEL: @out_of_bound_negative_gep( 270; CHECK-NEXT: [[OBJ:%.*]] = alloca i8, i32 4, align 1 271; CHECK-NEXT: [[SLIDE:%.*]] = getelementptr i8, ptr [[OBJ]], i8 -8 272; CHECK-NEXT: ret i64 -1 273; 274 %obj = alloca i8, i32 4 275 %slide = getelementptr i8, ptr %obj, i8 -8 276 %objsize = call i64 @llvm.objectsize.i64(ptr %slide, i1 false, i1 false, i1 false) 277 ret i64 %objsize 278} 279 280declare i32 @llvm.objectsize.i32.p0(ptr, i1, i1, i1) 281