1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt -passes=instcombine -S < %s | FileCheck %s 3 4target datalayout = "e-p:64:64:64-p1:32:32:32-p2:16:16:16-n8:16:32:64" 5 6 7declare void @llvm.memcpy.p0.p0.i32(ptr, ptr, i32, i1) nounwind 8declare void @llvm.memcpy.p0.p1.i32(ptr, ptr addrspace(1), i32, i1) nounwind 9declare void @llvm.memcpy.p0.p2.i32(ptr, ptr addrspace(2), i32, i1) nounwind 10 11 12define ptr @combine_redundant_addrspacecast(ptr addrspace(1) %x) nounwind { 13; CHECK-LABEL: @combine_redundant_addrspacecast( 14; CHECK-NEXT: [[Z:%.*]] = addrspacecast ptr addrspace(1) [[X:%.*]] to ptr 15; CHECK-NEXT: ret ptr [[Z]] 16; 17 %y = addrspacecast ptr addrspace(1) %x to ptr addrspace(3) 18 %z = addrspacecast ptr addrspace(3) %y to ptr 19 ret ptr %z 20} 21 22define <4 x ptr> @combine_redundant_addrspacecast_vector(<4 x ptr addrspace(1)> %x) nounwind { 23; CHECK-LABEL: @combine_redundant_addrspacecast_vector( 24; CHECK-NEXT: [[Z:%.*]] = addrspacecast <4 x ptr addrspace(1)> [[X:%.*]] to <4 x ptr> 25; CHECK-NEXT: ret <4 x ptr> [[Z]] 26; 27 %y = addrspacecast <4 x ptr addrspace(1)> %x to <4 x ptr addrspace(3)> 28 %z = addrspacecast <4 x ptr addrspace(3)> %y to <4 x ptr> 29 ret <4 x ptr> %z 30} 31 32define ptr @combine_redundant_addrspacecast_types(ptr addrspace(1) %x) nounwind { 33; CHECK-LABEL: @combine_redundant_addrspacecast_types( 34; CHECK-NEXT: [[Z:%.*]] = addrspacecast ptr addrspace(1) [[X:%.*]] to ptr 35; CHECK-NEXT: ret ptr [[Z]] 36; 37 %y = addrspacecast ptr addrspace(1) %x to ptr addrspace(3) 38 %z = addrspacecast ptr addrspace(3) %y to ptr 39 ret ptr %z 40} 41 42define <4 x ptr> @combine_redundant_addrspacecast_types_vector(<4 x ptr addrspace(1)> %x) nounwind { 43; CHECK-LABEL: @combine_redundant_addrspacecast_types_vector( 44; CHECK-NEXT: [[Z:%.*]] = addrspacecast <4 x ptr addrspace(1)> [[X:%.*]] to <4 x ptr> 45; CHECK-NEXT: ret <4 x ptr> [[Z]] 46; 47 %y = addrspacecast <4 x ptr addrspace(1)> %x to <4 x ptr addrspace(3)> 48 %z = addrspacecast <4 x ptr addrspace(3)> %y to <4 x ptr> 49 ret <4 x ptr> %z 50} 51 52define ptr addrspace(2) @combine_addrspacecast_bitcast_1(ptr addrspace(1) %x) nounwind { 53; CHECK-LABEL: @combine_addrspacecast_bitcast_1( 54; CHECK-NEXT: [[Y:%.*]] = addrspacecast ptr addrspace(1) [[X:%.*]] to ptr addrspace(2) 55; CHECK-NEXT: ret ptr addrspace(2) [[Y]] 56; 57 %y = addrspacecast ptr addrspace(1) %x to ptr addrspace(2) 58 ret ptr addrspace(2) %y 59} 60 61define ptr addrspace(2) @combine_addrspacecast_bitcast_2(ptr addrspace(1) %x) nounwind { 62; CHECK-LABEL: @combine_addrspacecast_bitcast_2( 63; CHECK-NEXT: [[Y:%.*]] = addrspacecast ptr addrspace(1) [[X:%.*]] to ptr addrspace(2) 64; CHECK-NEXT: ret ptr addrspace(2) [[Y]] 65; 66 %y = addrspacecast ptr addrspace(1) %x to ptr addrspace(2) 67 ret ptr addrspace(2) %y 68} 69 70define ptr addrspace(2) @combine_bitcast_addrspacecast_1(ptr addrspace(1) %x) nounwind { 71; CHECK-LABEL: @combine_bitcast_addrspacecast_1( 72; CHECK-NEXT: [[Z:%.*]] = addrspacecast ptr addrspace(1) [[X:%.*]] to ptr addrspace(2) 73; CHECK-NEXT: ret ptr addrspace(2) [[Z]] 74; 75 %z = addrspacecast ptr addrspace(1) %x to ptr addrspace(2) 76 ret ptr addrspace(2) %z 77} 78 79define ptr addrspace(2) @combine_bitcast_addrspacecast_2(ptr addrspace(1) %x) nounwind { 80; CHECK-LABEL: @combine_bitcast_addrspacecast_2( 81; CHECK-NEXT: [[Z:%.*]] = addrspacecast ptr addrspace(1) [[X:%.*]] to ptr addrspace(2) 82; CHECK-NEXT: ret ptr addrspace(2) [[Z]] 83; 84 %z = addrspacecast ptr addrspace(1) %x to ptr addrspace(2) 85 ret ptr addrspace(2) %z 86} 87 88define ptr addrspace(2) @combine_addrspacecast_types(ptr addrspace(1) %x) nounwind { 89; CHECK-LABEL: @combine_addrspacecast_types( 90; CHECK-NEXT: [[Y:%.*]] = addrspacecast ptr addrspace(1) [[X:%.*]] to ptr addrspace(2) 91; CHECK-NEXT: ret ptr addrspace(2) [[Y]] 92; 93 %y = addrspacecast ptr addrspace(1) %x to ptr addrspace(2) 94 ret ptr addrspace(2) %y 95} 96 97define <4 x ptr addrspace(2)> @combine_addrspacecast_types_vector(<4 x ptr addrspace(1)> %x) nounwind { 98; CHECK-LABEL: @combine_addrspacecast_types_vector( 99; CHECK-NEXT: [[Y:%.*]] = addrspacecast <4 x ptr addrspace(1)> [[X:%.*]] to <4 x ptr addrspace(2)> 100; CHECK-NEXT: ret <4 x ptr addrspace(2)> [[Y]] 101; 102 %y = addrspacecast <4 x ptr addrspace(1)> %x to <4 x ptr addrspace(2)> 103 ret <4 x ptr addrspace(2)> %y 104} 105 106define <vscale x 4 x ptr addrspace(2)> @combine_addrspacecast_types_scalevector(<vscale x 4 x ptr addrspace(1)> %x) nounwind { 107; CHECK-LABEL: @combine_addrspacecast_types_scalevector( 108; CHECK-NEXT: [[Y:%.*]] = addrspacecast <vscale x 4 x ptr addrspace(1)> [[X:%.*]] to <vscale x 4 x ptr addrspace(2)> 109; CHECK-NEXT: ret <vscale x 4 x ptr addrspace(2)> [[Y]] 110; 111 %y = addrspacecast <vscale x 4 x ptr addrspace(1)> %x to <vscale x 4 x ptr addrspace(2)> 112 ret <vscale x 4 x ptr addrspace(2)> %y 113} 114 115 116define i32 @canonicalize_addrspacecast(ptr addrspace(1) %arr) { 117; CHECK-LABEL: @canonicalize_addrspacecast( 118; CHECK-NEXT: [[P:%.*]] = addrspacecast ptr addrspace(1) [[ARR:%.*]] to ptr 119; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[P]], align 4 120; CHECK-NEXT: ret i32 [[V]] 121; 122 %p = addrspacecast ptr addrspace(1) %arr to ptr 123 %v = load i32, ptr %p 124 ret i32 %v 125} 126 127@const_array = addrspace(2) constant [60 x i8] [i8 2, i8 9, i8 4, i8 22, i8 2, i8 9, i8 4, i8 22, i8 2, i8 9, i8 4, i8 22, 128 i8 2, i8 9, i8 4, i8 22, i8 2, i8 9, i8 4, i8 22, i8 2, i8 9, i8 4, i8 22, 129 i8 2, i8 9, i8 4, i8 22, i8 2, i8 9, i8 4, i8 22, i8 2, i8 9, i8 4, i8 22, 130 i8 2, i8 9, i8 4, i8 22, i8 2, i8 9, i8 4, i8 22, i8 2, i8 9, i8 4, i8 22, 131 i8 2, i8 9, i8 4, i8 22, i8 2, i8 9, i8 4, i8 22, i8 2, i8 9, i8 4, i8 22 ] 132 133declare void @foo(ptr) nounwind 134 135; A copy from a constant addrspacecast'ed global 136define i32 @memcpy_addrspacecast() nounwind { 137; CHECK-LABEL: @memcpy_addrspacecast( 138; CHECK-NEXT: entry: 139; CHECK-NEXT: br label [[LOOP_BODY:%.*]] 140; CHECK: loop.body: 141; CHECK-NEXT: [[I:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[I_INC:%.*]], [[LOOP_BODY]] ] 142; CHECK-NEXT: [[SUM:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[SUM_INC:%.*]], [[LOOP_BODY]] ] 143; CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[I]] to i16 144; CHECK-NEXT: [[PTR:%.*]] = getelementptr i8, ptr addrspace(2) getelementptr inbounds nuw (i8, ptr addrspace(2) @const_array, i16 4), i16 [[TMP0]] 145; CHECK-NEXT: [[LOAD:%.*]] = load i8, ptr addrspace(2) [[PTR]], align 1 146; CHECK-NEXT: [[EXT:%.*]] = zext i8 [[LOAD]] to i32 147; CHECK-NEXT: [[SUM_INC]] = add i32 [[SUM]], [[EXT]] 148; CHECK-NEXT: [[I_INC]] = add i32 [[I]], 1 149; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[I]], 48 150; CHECK-NEXT: br i1 [[CMP_NOT]], label [[END:%.*]], label [[LOOP_BODY]] 151; CHECK: end: 152; CHECK-NEXT: ret i32 [[SUM_INC]] 153; 154entry: 155 %alloca = alloca i8, i32 48 156 call void @llvm.memcpy.p0.p1.i32(ptr align 4 %alloca, ptr addrspace(1) align 4 addrspacecast (ptr addrspace(2) getelementptr inbounds ([60 x i8], ptr addrspace(2) @const_array, i16 0, i16 4) to ptr addrspace(1)), i32 48, i1 false) nounwind 157 br label %loop.body 158 159loop.body: 160 %i = phi i32 [ 0, %entry ], [ %i.inc, %loop.body ] 161 %sum = phi i32 [ 0, %entry ], [ %sum.inc, %loop.body] 162 %ptr = getelementptr i8, ptr %alloca, i32 %i 163 %load = load i8, ptr %ptr 164 %ext = zext i8 %load to i32 165 %sum.inc = add i32 %sum, %ext 166 %i.inc = add i32 %i, 1 167 %cmp = icmp ne i32 %i, 48 168 br i1 %cmp, label %loop.body, label %end 169 170end: 171 ret i32 %sum.inc 172} 173 174define void @constant_fold_null() #0 { 175; CHECK-LABEL: @constant_fold_null( 176; CHECK-NEXT: store i32 7, ptr addrspace(4) addrspacecast (ptr addrspace(3) null to ptr addrspace(4)), align 4 177; CHECK-NEXT: ret void 178; 179 %cast = addrspacecast ptr addrspace(3) null to ptr addrspace(4) 180 store i32 7, ptr addrspace(4) %cast 181 ret void 182} 183 184define ptr addrspace(4) @constant_fold_undef() #0 { 185; CHECK-LABEL: @constant_fold_undef( 186; CHECK-NEXT: ret ptr addrspace(4) undef 187; 188 %cast = addrspacecast ptr addrspace(3) undef to ptr addrspace(4) 189 ret ptr addrspace(4) %cast 190} 191 192define <4 x ptr addrspace(4)> @constant_fold_null_vector() #0 { 193; CHECK-LABEL: @constant_fold_null_vector( 194; CHECK-NEXT: ret <4 x ptr addrspace(4)> addrspacecast (<4 x ptr addrspace(3)> zeroinitializer to <4 x ptr addrspace(4)>) 195; 196 %cast = addrspacecast <4 x ptr addrspace(3)> zeroinitializer to <4 x ptr addrspace(4)> 197 ret <4 x ptr addrspace(4)> %cast 198} 199 200define void @constant_fold_inttoptr() #0 { 201; CHECK-LABEL: @constant_fold_inttoptr( 202; CHECK-NEXT: store i32 7, ptr addrspace(4) addrspacecast (ptr addrspace(3) inttoptr (i32 -1 to ptr addrspace(3)) to ptr addrspace(4)), align 4 203; CHECK-NEXT: ret void 204; 205 %cast = addrspacecast ptr addrspace(3) inttoptr (i32 -1 to ptr addrspace(3)) to ptr addrspace(4) 206 store i32 7, ptr addrspace(4) %cast 207 ret void 208} 209 210define void @constant_fold_gep_inttoptr() #0 { 211; CHECK-LABEL: @constant_fold_gep_inttoptr( 212; CHECK-NEXT: store i32 7, ptr addrspace(4) addrspacecast (ptr addrspace(3) inttoptr (i64 1274 to ptr addrspace(3)) to ptr addrspace(4)), align 4 213; CHECK-NEXT: ret void 214; 215 %k = inttoptr i32 1234 to ptr addrspace(3) 216 %gep = getelementptr i32, ptr addrspace(3) %k, i32 10 217 %cast = addrspacecast ptr addrspace(3) %gep to ptr addrspace(4) 218 store i32 7, ptr addrspace(4) %cast 219 ret void 220} 221