1; RUN: llc -show-mc-encoding --amdhsa-code-object-version=2 -mattr=+promote-alloca -disable-promote-alloca-to-vector -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -march=amdgcn < %s | FileCheck -enable-var-scope -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC %s 2; RUN: llc -show-mc-encoding --amdhsa-code-object-version=2 -mattr=+promote-alloca -disable-promote-alloca-to-vector -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -mtriple=amdgcn--amdhsa -mcpu=kaveri -mattr=-unaligned-access-mode < %s | FileCheck -enable-var-scope -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC -check-prefix=HSA-PROMOTE %s 3; RUN: llc -show-mc-encoding --amdhsa-code-object-version=2 -mattr=-promote-alloca -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -march=amdgcn < %s | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC 4; RUN: llc -show-mc-encoding --amdhsa-code-object-version=2 -mattr=-promote-alloca -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -mtriple=amdgcn-amdhsa -mcpu=kaveri -mattr=-unaligned-access-mode < %s | FileCheck -enable-var-scope -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC -check-prefix=HSA-ALLOCA %s 5; RUN: llc -show-mc-encoding --amdhsa-code-object-version=2 -mattr=+promote-alloca -disable-promote-alloca-to-vector -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -mtriple=amdgcn-amdhsa -march=amdgcn -mcpu=tonga -mattr=-unaligned-access-mode < %s | FileCheck -enable-var-scope -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC %s 6; RUN: llc -show-mc-encoding --amdhsa-code-object-version=2 -mattr=+promote-alloca -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -mtriple=amdgcn-amdhsa -march=amdgcn -mcpu=tonga -mattr=-unaligned-access-mode < %s | FileCheck -enable-var-scope -check-prefix=SI-PROMOTE-VECT -check-prefix=SI -check-prefix=FUNC %s 7; RUN: llc -show-mc-encoding --amdhsa-code-object-version=2 -mattr=-promote-alloca -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -mtriple=amdgcn-amdhsa -march=amdgcn -mcpu=tonga -mattr=-unaligned-access-mode < %s | FileCheck -enable-var-scope -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC %s 8 9; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -data-layout=A5 -mcpu=kaveri -passes=amdgpu-promote-alloca -disable-promote-alloca-to-vector < %s | FileCheck -enable-var-scope -check-prefix=HSAOPT -check-prefix=OPT %s 10; RUN: opt -S -mtriple=amdgcn-unknown-unknown -data-layout=A5 -mcpu=kaveri -passes=amdgpu-promote-alloca -disable-promote-alloca-to-vector < %s | FileCheck -enable-var-scope -check-prefix=NOHSAOPT -check-prefix=OPT %s 11 12; RUN: llc -march=r600 -mcpu=cypress -disable-promote-alloca-to-vector < %s | FileCheck %s -check-prefix=R600 -check-prefix=FUNC 13; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck %s -check-prefix=R600-VECT -check-prefix=FUNC 14 15; HSAOPT: @mova_same_clause.stack = internal unnamed_addr addrspace(3) global [256 x [5 x i32]] poison, align 4 16; HSAOPT: @high_alignment.stack = internal unnamed_addr addrspace(3) global [256 x [8 x i32]] poison, align 16 17 18 19; FUNC-LABEL: {{^}}mova_same_clause: 20; OPT-LABEL: @mova_same_clause( 21 22; R600: LDS_WRITE 23; R600: LDS_WRITE 24; R600: LDS_READ 25; R600: LDS_READ 26 27; HSA-PROMOTE: .amd_kernel_code_t 28; HSA-PROMOTE: workgroup_group_segment_byte_size = 5120 29; HSA-PROMOTE: .end_amd_kernel_code_t 30 31; HSA-PROMOTE: s_load_dwordx2 s[{{[0-9:]+}}], s[4:5], 0x1 32 33; SI-PROMOTE: ds_write_b32 34; SI-PROMOTE: ds_write_b32 35; SI-PROMOTE: ds_read_b32 36; SI-PROMOTE: ds_read_b32 37 38; HSA-ALLOCA: .amd_kernel_code_t 39; FIXME: Creating the emergency stack slots causes us to over-estimate scratch 40; by 4 bytes. 41; HSA-ALLOCA: workitem_private_segment_byte_size = 24 42; HSA-ALLOCA: .end_amd_kernel_code_t 43 44; HSA-ALLOCA: s_mov_b32 flat_scratch_lo, s7 45; HSA-ALLOCA: s_add_i32 s6, s6, s9 46; HSA-ALLOCA: s_lshr_b32 flat_scratch_hi, s6, 8 47 48; SI-ALLOCA: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0 offen ; encoding: [0x00,0x10,0x70,0xe0 49; SI-ALLOCA: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0 offen ; encoding: [0x00,0x10,0x70,0xe0 50 51 52; HSAOPT: [[DISPATCH_PTR:%[0-9]+]] = call noalias nonnull dereferenceable(64) i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr() 53; HSAOPT: [[CAST_DISPATCH_PTR:%[0-9]+]] = bitcast i8 addrspace(4)* [[DISPATCH_PTR]] to i32 addrspace(4)* 54; HSAOPT: [[GEP0:%[0-9]+]] = getelementptr inbounds i32, i32 addrspace(4)* [[CAST_DISPATCH_PTR]], i64 1 55; HSAOPT: [[LDXY:%[0-9]+]] = load i32, i32 addrspace(4)* [[GEP0]], align 4, !invariant.load !0 56; HSAOPT: [[GEP1:%[0-9]+]] = getelementptr inbounds i32, i32 addrspace(4)* [[CAST_DISPATCH_PTR]], i64 2 57; HSAOPT: [[LDZU:%[0-9]+]] = load i32, i32 addrspace(4)* [[GEP1]], align 4, !range !1, !invariant.load !0 58; HSAOPT: [[EXTRACTY:%[0-9]+]] = lshr i32 [[LDXY]], 16 59 60; HSAOPT: [[WORKITEM_ID_X:%[0-9]+]] = call i32 @llvm.amdgcn.workitem.id.x(), !range !2 61; HSAOPT: [[WORKITEM_ID_Y:%[0-9]+]] = call i32 @llvm.amdgcn.workitem.id.y(), !range !2 62; HSAOPT: [[WORKITEM_ID_Z:%[0-9]+]] = call i32 @llvm.amdgcn.workitem.id.z(), !range !2 63 64; HSAOPT: [[Y_SIZE_X_Z_SIZE:%[0-9]+]] = mul nuw nsw i32 [[EXTRACTY]], [[LDZU]] 65; HSAOPT: [[YZ_X_XID:%[0-9]+]] = mul i32 [[Y_SIZE_X_Z_SIZE]], [[WORKITEM_ID_X]] 66; HSAOPT: [[Y_X_Z_SIZE:%[0-9]+]] = mul nuw nsw i32 [[WORKITEM_ID_Y]], [[LDZU]] 67; HSAOPT: [[ADD_YZ_X_X_YZ_SIZE:%[0-9]+]] = add i32 [[YZ_X_XID]], [[Y_X_Z_SIZE]] 68; HSAOPT: [[ADD_ZID:%[0-9]+]] = add i32 [[ADD_YZ_X_X_YZ_SIZE]], [[WORKITEM_ID_Z]] 69 70; HSAOPT: [[LOCAL_GEP:%[0-9]+]] = getelementptr inbounds [256 x [5 x i32]], [256 x [5 x i32]] addrspace(3)* @mova_same_clause.stack, i32 0, i32 [[ADD_ZID]] 71; HSAOPT: %arrayidx1 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(3)* [[LOCAL_GEP]], i32 0, i32 {{%[0-9]+}} 72; HSAOPT: %arrayidx3 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(3)* [[LOCAL_GEP]], i32 0, i32 {{%[0-9]+}} 73; HSAOPT: %arrayidx10 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(3)* [[LOCAL_GEP]], i32 0, i32 0 74; HSAOPT: %arrayidx12 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(3)* [[LOCAL_GEP]], i32 0, i32 1 75 76 77; NOHSAOPT: call i32 @llvm.r600.read.local.size.y(), !range !0 78; NOHSAOPT: call i32 @llvm.r600.read.local.size.z(), !range !0 79; NOHSAOPT: call i32 @llvm.amdgcn.workitem.id.x(), !range !1 80; NOHSAOPT: call i32 @llvm.amdgcn.workitem.id.y(), !range !1 81; NOHSAOPT: call i32 @llvm.amdgcn.workitem.id.z(), !range !1 82define amdgpu_kernel void @mova_same_clause(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #0 { 83entry: 84 %stack = alloca [5 x i32], align 4, addrspace(5) 85 %0 = load i32, i32 addrspace(1)* %in, align 4 86 %arrayidx1 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(5)* %stack, i32 0, i32 %0 87 store i32 4, i32 addrspace(5)* %arrayidx1, align 4 88 %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 1 89 %1 = load i32, i32 addrspace(1)* %arrayidx2, align 4 90 %arrayidx3 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(5)* %stack, i32 0, i32 %1 91 store i32 5, i32 addrspace(5)* %arrayidx3, align 4 92 %arrayidx10 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(5)* %stack, i32 0, i32 0 93 %2 = load i32, i32 addrspace(5)* %arrayidx10, align 4 94 store i32 %2, i32 addrspace(1)* %out, align 4 95 %arrayidx12 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(5)* %stack, i32 0, i32 1 96 %3 = load i32, i32 addrspace(5)* %arrayidx12 97 %arrayidx13 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 1 98 store i32 %3, i32 addrspace(1)* %arrayidx13 99 ret void 100} 101 102; OPT-LABEL: @high_alignment( 103; OPT: getelementptr inbounds [256 x [8 x i32]], [256 x [8 x i32]] addrspace(3)* @high_alignment.stack, i32 0, i32 %{{[0-9]+}} 104define amdgpu_kernel void @high_alignment(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #0 { 105entry: 106 %stack = alloca [8 x i32], align 16, addrspace(5) 107 %0 = load i32, i32 addrspace(1)* %in, align 4 108 %arrayidx1 = getelementptr inbounds [8 x i32], [8 x i32] addrspace(5)* %stack, i32 0, i32 %0 109 store i32 4, i32 addrspace(5)* %arrayidx1, align 4 110 %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 1 111 %1 = load i32, i32 addrspace(1)* %arrayidx2, align 4 112 %arrayidx3 = getelementptr inbounds [8 x i32], [8 x i32] addrspace(5)* %stack, i32 0, i32 %1 113 store i32 5, i32 addrspace(5)* %arrayidx3, align 4 114 %arrayidx10 = getelementptr inbounds [8 x i32], [8 x i32] addrspace(5)* %stack, i32 0, i32 0 115 %2 = load i32, i32 addrspace(5)* %arrayidx10, align 4 116 store i32 %2, i32 addrspace(1)* %out, align 4 117 %arrayidx12 = getelementptr inbounds [8 x i32], [8 x i32] addrspace(5)* %stack, i32 0, i32 1 118 %3 = load i32, i32 addrspace(5)* %arrayidx12 119 %arrayidx13 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 1 120 store i32 %3, i32 addrspace(1)* %arrayidx13 121 ret void 122} 123 124; FUNC-LABEL: {{^}}no_replace_inbounds_gep: 125; OPT-LABEL: @no_replace_inbounds_gep( 126; OPT: alloca [5 x i32] 127 128; SI-NOT: ds_write 129define amdgpu_kernel void @no_replace_inbounds_gep(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #0 { 130entry: 131 %stack = alloca [5 x i32], align 4, addrspace(5) 132 %0 = load i32, i32 addrspace(1)* %in, align 4 133 %arrayidx1 = getelementptr [5 x i32], [5 x i32] addrspace(5)* %stack, i32 0, i32 %0 134 store i32 4, i32 addrspace(5)* %arrayidx1, align 4 135 %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 1 136 %1 = load i32, i32 addrspace(1)* %arrayidx2, align 4 137 %arrayidx3 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(5)* %stack, i32 0, i32 %1 138 store i32 5, i32 addrspace(5)* %arrayidx3, align 4 139 %arrayidx10 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(5)* %stack, i32 0, i32 0 140 %2 = load i32, i32 addrspace(5)* %arrayidx10, align 4 141 store i32 %2, i32 addrspace(1)* %out, align 4 142 %arrayidx12 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(5)* %stack, i32 0, i32 1 143 %3 = load i32, i32 addrspace(5)* %arrayidx12 144 %arrayidx13 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 1 145 store i32 %3, i32 addrspace(1)* %arrayidx13 146 ret void 147} 148 149; This test checks that the stack offset is calculated correctly for structs. 150; All register loads/stores should be optimized away, so there shouldn't be 151; any MOVA instructions. 152; 153; XXX: This generated code has unnecessary MOVs, we should be able to optimize 154; this. 155 156; FUNC-LABEL: {{^}}multiple_structs: 157; OPT-LABEL: @multiple_structs( 158 159; R600-NOT: MOVA_INT 160; SI-NOT: v_movrel 161; SI-NOT: v_movrel 162%struct.point = type { i32, i32 } 163 164define amdgpu_kernel void @multiple_structs(i32 addrspace(1)* %out) #0 { 165entry: 166 %a = alloca %struct.point, addrspace(5) 167 %b = alloca %struct.point, addrspace(5) 168 %a.x.ptr = getelementptr %struct.point, %struct.point addrspace(5)* %a, i32 0, i32 0 169 %a.y.ptr = getelementptr %struct.point, %struct.point addrspace(5)* %a, i32 0, i32 1 170 %b.x.ptr = getelementptr %struct.point, %struct.point addrspace(5)* %b, i32 0, i32 0 171 %b.y.ptr = getelementptr %struct.point, %struct.point addrspace(5)* %b, i32 0, i32 1 172 store i32 0, i32 addrspace(5)* %a.x.ptr 173 store i32 1, i32 addrspace(5)* %a.y.ptr 174 store i32 2, i32 addrspace(5)* %b.x.ptr 175 store i32 3, i32 addrspace(5)* %b.y.ptr 176 %a.indirect.ptr = getelementptr %struct.point, %struct.point addrspace(5)* %a, i32 0, i32 0 177 %b.indirect.ptr = getelementptr %struct.point, %struct.point addrspace(5)* %b, i32 0, i32 0 178 %a.indirect = load i32, i32 addrspace(5)* %a.indirect.ptr 179 %b.indirect = load i32, i32 addrspace(5)* %b.indirect.ptr 180 %0 = add i32 %a.indirect, %b.indirect 181 store i32 %0, i32 addrspace(1)* %out 182 ret void 183} 184 185; Test direct access of a private array inside a loop. The private array 186; loads and stores should be lowered to copies, so there shouldn't be any 187; MOVA instructions. 188 189; FUNC-LABEL: {{^}}direct_loop: 190; R600-NOT: MOVA_INT 191; SI-NOT: v_movrel 192 193define amdgpu_kernel void @direct_loop(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 { 194entry: 195 %prv_array_const = alloca [2 x i32], addrspace(5) 196 %prv_array = alloca [2 x i32], addrspace(5) 197 %a = load i32, i32 addrspace(1)* %in 198 %b_src_ptr = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 1 199 %b = load i32, i32 addrspace(1)* %b_src_ptr 200 %a_dst_ptr = getelementptr inbounds [2 x i32], [2 x i32] addrspace(5)* %prv_array_const, i32 0, i32 0 201 store i32 %a, i32 addrspace(5)* %a_dst_ptr 202 %b_dst_ptr = getelementptr inbounds [2 x i32], [2 x i32] addrspace(5)* %prv_array_const, i32 0, i32 1 203 store i32 %b, i32 addrspace(5)* %b_dst_ptr 204 br label %for.body 205 206for.body: 207 %inc = phi i32 [0, %entry], [%count, %for.body] 208 %x_ptr = getelementptr inbounds [2 x i32], [2 x i32] addrspace(5)* %prv_array_const, i32 0, i32 0 209 %x = load i32, i32 addrspace(5)* %x_ptr 210 %y_ptr = getelementptr inbounds [2 x i32], [2 x i32] addrspace(5)* %prv_array, i32 0, i32 0 211 %y = load i32, i32 addrspace(5)* %y_ptr 212 %xy = add i32 %x, %y 213 store i32 %xy, i32 addrspace(5)* %y_ptr 214 %count = add i32 %inc, 1 215 %done = icmp eq i32 %count, 4095 216 br i1 %done, label %for.end, label %for.body 217 218for.end: 219 %value_ptr = getelementptr inbounds [2 x i32], [2 x i32] addrspace(5)* %prv_array, i32 0, i32 0 220 %value = load i32, i32 addrspace(5)* %value_ptr 221 store i32 %value, i32 addrspace(1)* %out 222 ret void 223} 224 225; FUNC-LABEL: {{^}}short_array: 226 227; R600-VECT: MOVA_INT 228 229; SI-ALLOCA-DAG: buffer_store_short v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], 0 offset:6 ; encoding: [0x06,0x00,0x68,0xe0 230; SI-ALLOCA-DAG: buffer_store_short v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], 0 offset:4 ; encoding: [0x04,0x00,0x68,0xe0 231; Loaded value is 0 or 1, so sext will become zext, so we get buffer_load_ushort instead of buffer_load_sshort. 232; SI-ALLOCA: buffer_load_sshort v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0 233 234; SI-PROMOTE-VECT: s_load_dword [[IDX:s[0-9]+]] 235; SI-PROMOTE-VECT: s_lshl_b32 [[SCALED_IDX:s[0-9]+]], [[IDX]], 4 236; SI-PROMOTE-VECT: s_lshr_b32 [[SREG:s[0-9]+]], 0x10000, [[SCALED_IDX]] 237; SI-PROMOTE-VECT: s_and_b32 s{{[0-9]+}}, [[SREG]], 0xffff 238define amdgpu_kernel void @short_array(i32 addrspace(1)* %out, i32 %index) #0 { 239entry: 240 %0 = alloca [2 x i16], addrspace(5) 241 %1 = getelementptr inbounds [2 x i16], [2 x i16] addrspace(5)* %0, i32 0, i32 0 242 %2 = getelementptr inbounds [2 x i16], [2 x i16] addrspace(5)* %0, i32 0, i32 1 243 store i16 0, i16 addrspace(5)* %1 244 store i16 1, i16 addrspace(5)* %2 245 %3 = getelementptr inbounds [2 x i16], [2 x i16] addrspace(5)* %0, i32 0, i32 %index 246 %4 = load i16, i16 addrspace(5)* %3 247 %5 = sext i16 %4 to i32 248 store i32 %5, i32 addrspace(1)* %out 249 ret void 250} 251 252; FUNC-LABEL: {{^}}char_array: 253 254; R600-VECT: MOVA_INT 255 256; SI-PROMOTE-VECT-DAG: s_lshl_b32 257; SI-PROMOTE-VECT-DAG: v_lshrrev 258 259; SI-ALLOCA-DAG: buffer_store_byte v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], 0 offset:4 ; encoding: [0x04,0x00,0x60,0xe0 260; SI-ALLOCA-DAG: buffer_store_byte v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], 0 offset:5 ; encoding: [0x05,0x00,0x60,0xe0 261define amdgpu_kernel void @char_array(i32 addrspace(1)* %out, i32 %index) #0 { 262entry: 263 %0 = alloca [2 x i8], addrspace(5) 264 %1 = getelementptr inbounds [2 x i8], [2 x i8] addrspace(5)* %0, i32 0, i32 0 265 %2 = getelementptr inbounds [2 x i8], [2 x i8] addrspace(5)* %0, i32 0, i32 1 266 store i8 0, i8 addrspace(5)* %1 267 store i8 1, i8 addrspace(5)* %2 268 %3 = getelementptr inbounds [2 x i8], [2 x i8] addrspace(5)* %0, i32 0, i32 %index 269 %4 = load i8, i8 addrspace(5)* %3 270 %5 = sext i8 %4 to i32 271 store i32 %5, i32 addrspace(1)* %out 272 ret void 273} 274 275; Test that two stack objects are not stored in the same register 276; The second stack object should be in T3.X 277; FUNC-LABEL: {{^}}no_overlap: 278; 279; A total of 5 bytes should be allocated and used. 280; SI: buffer_store_byte v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], 0 offset:4 ; 281define amdgpu_kernel void @no_overlap(i32 addrspace(1)* %out, i32 %in) #0 { 282entry: 283 %0 = alloca [3 x i8], align 1, addrspace(5) 284 %1 = alloca [2 x i8], align 1, addrspace(5) 285 %2 = getelementptr [3 x i8], [3 x i8] addrspace(5)* %0, i32 0, i32 0 286 %3 = getelementptr [3 x i8], [3 x i8] addrspace(5)* %0, i32 0, i32 1 287 %4 = getelementptr [3 x i8], [3 x i8] addrspace(5)* %0, i32 0, i32 2 288 %5 = getelementptr [2 x i8], [2 x i8] addrspace(5)* %1, i32 0, i32 0 289 %6 = getelementptr [2 x i8], [2 x i8] addrspace(5)* %1, i32 0, i32 1 290 store i8 0, i8 addrspace(5)* %2 291 store i8 1, i8 addrspace(5)* %3 292 store i8 2, i8 addrspace(5)* %4 293 store i8 1, i8 addrspace(5)* %5 294 store i8 0, i8 addrspace(5)* %6 295 %7 = getelementptr [3 x i8], [3 x i8] addrspace(5)* %0, i32 0, i32 %in 296 %8 = getelementptr [2 x i8], [2 x i8] addrspace(5)* %1, i32 0, i32 %in 297 %9 = load i8, i8 addrspace(5)* %7 298 %10 = load i8, i8 addrspace(5)* %8 299 %11 = add i8 %9, %10 300 %12 = sext i8 %11 to i32 301 store i32 %12, i32 addrspace(1)* %out 302 ret void 303} 304 305define amdgpu_kernel void @char_array_array(i32 addrspace(1)* %out, i32 %index) #0 { 306entry: 307 %alloca = alloca [2 x [2 x i8]], addrspace(5) 308 %gep0 = getelementptr [2 x [2 x i8]], [2 x [2 x i8]] addrspace(5)* %alloca, i32 0, i32 0, i32 0 309 %gep1 = getelementptr [2 x [2 x i8]], [2 x [2 x i8]] addrspace(5)* %alloca, i32 0, i32 0, i32 1 310 store i8 0, i8 addrspace(5)* %gep0 311 store i8 1, i8 addrspace(5)* %gep1 312 %gep2 = getelementptr [2 x [2 x i8]], [2 x [2 x i8]] addrspace(5)* %alloca, i32 0, i32 0, i32 %index 313 %load = load i8, i8 addrspace(5)* %gep2 314 %sext = sext i8 %load to i32 315 store i32 %sext, i32 addrspace(1)* %out 316 ret void 317} 318 319define amdgpu_kernel void @i32_array_array(i32 addrspace(1)* %out, i32 %index) #0 { 320entry: 321 %alloca = alloca [2 x [2 x i32]], addrspace(5) 322 %gep0 = getelementptr [2 x [2 x i32]], [2 x [2 x i32]] addrspace(5)* %alloca, i32 0, i32 0, i32 0 323 %gep1 = getelementptr [2 x [2 x i32]], [2 x [2 x i32]] addrspace(5)* %alloca, i32 0, i32 0, i32 1 324 store i32 0, i32 addrspace(5)* %gep0 325 store i32 1, i32 addrspace(5)* %gep1 326 %gep2 = getelementptr [2 x [2 x i32]], [2 x [2 x i32]] addrspace(5)* %alloca, i32 0, i32 0, i32 %index 327 %load = load i32, i32 addrspace(5)* %gep2 328 store i32 %load, i32 addrspace(1)* %out 329 ret void 330} 331 332define amdgpu_kernel void @i64_array_array(i64 addrspace(1)* %out, i32 %index) #0 { 333entry: 334 %alloca = alloca [2 x [2 x i64]], addrspace(5) 335 %gep0 = getelementptr [2 x [2 x i64]], [2 x [2 x i64]] addrspace(5)* %alloca, i32 0, i32 0, i32 0 336 %gep1 = getelementptr [2 x [2 x i64]], [2 x [2 x i64]] addrspace(5)* %alloca, i32 0, i32 0, i32 1 337 store i64 0, i64 addrspace(5)* %gep0 338 store i64 1, i64 addrspace(5)* %gep1 339 %gep2 = getelementptr [2 x [2 x i64]], [2 x [2 x i64]] addrspace(5)* %alloca, i32 0, i32 0, i32 %index 340 %load = load i64, i64 addrspace(5)* %gep2 341 store i64 %load, i64 addrspace(1)* %out 342 ret void 343} 344 345%struct.pair32 = type { i32, i32 } 346 347define amdgpu_kernel void @struct_array_array(i32 addrspace(1)* %out, i32 %index) #0 { 348entry: 349 %alloca = alloca [2 x [2 x %struct.pair32]], addrspace(5) 350 %gep0 = getelementptr [2 x [2 x %struct.pair32]], [2 x [2 x %struct.pair32]] addrspace(5)* %alloca, i32 0, i32 0, i32 0, i32 1 351 %gep1 = getelementptr [2 x [2 x %struct.pair32]], [2 x [2 x %struct.pair32]] addrspace(5)* %alloca, i32 0, i32 0, i32 1, i32 1 352 store i32 0, i32 addrspace(5)* %gep0 353 store i32 1, i32 addrspace(5)* %gep1 354 %gep2 = getelementptr [2 x [2 x %struct.pair32]], [2 x [2 x %struct.pair32]] addrspace(5)* %alloca, i32 0, i32 0, i32 %index, i32 0 355 %load = load i32, i32 addrspace(5)* %gep2 356 store i32 %load, i32 addrspace(1)* %out 357 ret void 358} 359 360define amdgpu_kernel void @struct_pair32_array(i32 addrspace(1)* %out, i32 %index) #0 { 361entry: 362 %alloca = alloca [2 x %struct.pair32], addrspace(5) 363 %gep0 = getelementptr [2 x %struct.pair32], [2 x %struct.pair32] addrspace(5)* %alloca, i32 0, i32 0, i32 1 364 %gep1 = getelementptr [2 x %struct.pair32], [2 x %struct.pair32] addrspace(5)* %alloca, i32 0, i32 1, i32 0 365 store i32 0, i32 addrspace(5)* %gep0 366 store i32 1, i32 addrspace(5)* %gep1 367 %gep2 = getelementptr [2 x %struct.pair32], [2 x %struct.pair32] addrspace(5)* %alloca, i32 0, i32 %index, i32 0 368 %load = load i32, i32 addrspace(5)* %gep2 369 store i32 %load, i32 addrspace(1)* %out 370 ret void 371} 372 373define amdgpu_kernel void @select_private(i32 addrspace(1)* %out, i32 %in) nounwind { 374entry: 375 %tmp = alloca [2 x i32], addrspace(5) 376 %tmp1 = getelementptr [2 x i32], [2 x i32] addrspace(5)* %tmp, i32 0, i32 0 377 %tmp2 = getelementptr [2 x i32], [2 x i32] addrspace(5)* %tmp, i32 0, i32 1 378 store i32 0, i32 addrspace(5)* %tmp1 379 store i32 1, i32 addrspace(5)* %tmp2 380 %cmp = icmp eq i32 %in, 0 381 %sel = select i1 %cmp, i32 addrspace(5)* %tmp1, i32 addrspace(5)* %tmp2 382 %load = load i32, i32 addrspace(5)* %sel 383 store i32 %load, i32 addrspace(1)* %out 384 ret void 385} 386 387; AMDGPUPromoteAlloca does not know how to handle ptrtoint. When it 388; finds one, it should stop trying to promote. 389 390; FUNC-LABEL: ptrtoint: 391; SI-NOT: ds_write 392; SI: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0 offen 393; SI: v_add_{{[iu]}}32_e32 [[ADD_OFFSET:v[0-9]+]], vcc, 5, 394; SI: buffer_load_dword v{{[0-9]+}}, [[ADD_OFFSET:v[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0 offen ; 395define amdgpu_kernel void @ptrtoint(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 { 396 %alloca = alloca [16 x i32], addrspace(5) 397 %tmp0 = getelementptr [16 x i32], [16 x i32] addrspace(5)* %alloca, i32 0, i32 %a 398 store i32 5, i32 addrspace(5)* %tmp0 399 %tmp1 = ptrtoint [16 x i32] addrspace(5)* %alloca to i32 400 %tmp2 = add i32 %tmp1, 5 401 %tmp3 = inttoptr i32 %tmp2 to i32 addrspace(5)* 402 %tmp4 = getelementptr i32, i32 addrspace(5)* %tmp3, i32 %b 403 %tmp5 = load i32, i32 addrspace(5)* %tmp4 404 store i32 %tmp5, i32 addrspace(1)* %out 405 ret void 406} 407 408; OPT-LABEL: @pointer_typed_alloca( 409; OPT: getelementptr inbounds [256 x i32 addrspace(1)*], [256 x i32 addrspace(1)*] addrspace(3)* @pointer_typed_alloca.A.addr, i32 0, i32 %{{[0-9]+}} 410; OPT: load i32 addrspace(1)*, i32 addrspace(1)* addrspace(3)* %{{[0-9]+}}, align 4 411define amdgpu_kernel void @pointer_typed_alloca(i32 addrspace(1)* %A) #1 { 412entry: 413 %A.addr = alloca i32 addrspace(1)*, align 4, addrspace(5) 414 store i32 addrspace(1)* %A, i32 addrspace(1)* addrspace(5)* %A.addr, align 4 415 %ld0 = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(5)* %A.addr, align 4 416 %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %ld0, i32 0 417 store i32 1, i32 addrspace(1)* %arrayidx, align 4 418 %ld1 = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(5)* %A.addr, align 4 419 %arrayidx1 = getelementptr inbounds i32, i32 addrspace(1)* %ld1, i32 1 420 store i32 2, i32 addrspace(1)* %arrayidx1, align 4 421 %ld2 = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(5)* %A.addr, align 4 422 %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %ld2, i32 2 423 store i32 3, i32 addrspace(1)* %arrayidx2, align 4 424 ret void 425} 426 427; FUNC-LABEL: v16i32_stack: 428 429; R600: MOVA_INT 430; R600: MOVA_INT 431; R600: MOVA_INT 432; R600: MOVA_INT 433; R600: MOVA_INT 434; R600: MOVA_INT 435; R600: MOVA_INT 436; R600: MOVA_INT 437; R600: MOVA_INT 438; R600: MOVA_INT 439; R600: MOVA_INT 440; R600: MOVA_INT 441; R600: MOVA_INT 442; R600: MOVA_INT 443; R600: MOVA_INT 444; R600: MOVA_INT 445 446; SI: buffer_load_dword 447; SI: buffer_load_dword 448; SI: buffer_load_dword 449; SI: buffer_load_dword 450; SI: buffer_load_dword 451; SI: buffer_load_dword 452; SI: buffer_load_dword 453; SI: buffer_load_dword 454; SI: buffer_load_dword 455; SI: buffer_load_dword 456; SI: buffer_load_dword 457; SI: buffer_load_dword 458; SI: buffer_load_dword 459; SI: buffer_load_dword 460; SI: buffer_load_dword 461; SI: buffer_load_dword 462 463define amdgpu_kernel void @v16i32_stack(<16 x i32> addrspace(1)* %out, i32 %a) { 464 %alloca = alloca [2 x <16 x i32>], addrspace(5) 465 %tmp0 = getelementptr [2 x <16 x i32>], [2 x <16 x i32>] addrspace(5)* %alloca, i32 0, i32 %a 466 %tmp5 = load <16 x i32>, <16 x i32> addrspace(5)* %tmp0 467 store <16 x i32> %tmp5, <16 x i32> addrspace(1)* %out 468 ret void 469} 470 471; FUNC-LABEL: v16float_stack: 472 473; R600: MOVA_INT 474; R600: MOVA_INT 475; R600: MOVA_INT 476; R600: MOVA_INT 477; R600: MOVA_INT 478; R600: MOVA_INT 479; R600: MOVA_INT 480; R600: MOVA_INT 481; R600: MOVA_INT 482; R600: MOVA_INT 483; R600: MOVA_INT 484; R600: MOVA_INT 485; R600: MOVA_INT 486; R600: MOVA_INT 487; R600: MOVA_INT 488; R600: MOVA_INT 489 490; SI: buffer_load_dword 491; SI: buffer_load_dword 492; SI: buffer_load_dword 493; SI: buffer_load_dword 494; SI: buffer_load_dword 495; SI: buffer_load_dword 496; SI: buffer_load_dword 497; SI: buffer_load_dword 498; SI: buffer_load_dword 499; SI: buffer_load_dword 500; SI: buffer_load_dword 501; SI: buffer_load_dword 502; SI: buffer_load_dword 503; SI: buffer_load_dword 504; SI: buffer_load_dword 505; SI: buffer_load_dword 506 507define amdgpu_kernel void @v16float_stack(<16 x float> addrspace(1)* %out, i32 %a) { 508 %alloca = alloca [2 x <16 x float>], addrspace(5) 509 %tmp0 = getelementptr [2 x <16 x float>], [2 x <16 x float>] addrspace(5)* %alloca, i32 0, i32 %a 510 %tmp5 = load <16 x float>, <16 x float> addrspace(5)* %tmp0 511 store <16 x float> %tmp5, <16 x float> addrspace(1)* %out 512 ret void 513} 514 515; FUNC-LABEL: v2float_stack: 516 517; R600: MOVA_INT 518; R600: MOVA_INT 519 520; SI: buffer_load_dword 521; SI: buffer_load_dword 522 523define amdgpu_kernel void @v2float_stack(<2 x float> addrspace(1)* %out, i32 %a) { 524 %alloca = alloca [16 x <2 x float>], addrspace(5) 525 %tmp0 = getelementptr [16 x <2 x float>], [16 x <2 x float>] addrspace(5)* %alloca, i32 0, i32 %a 526 %tmp5 = load <2 x float>, <2 x float> addrspace(5)* %tmp0 527 store <2 x float> %tmp5, <2 x float> addrspace(1)* %out 528 ret void 529} 530 531; OPT-LABEL: @direct_alloca_read_0xi32( 532; OPT: store [0 x i32] undef, [0 x i32] addrspace(3)* 533; OPT: load [0 x i32], [0 x i32] addrspace(3)* 534define amdgpu_kernel void @direct_alloca_read_0xi32([0 x i32] addrspace(1)* %out, i32 %index) { 535entry: 536 %tmp = alloca [0 x i32], addrspace(5) 537 store [0 x i32] [], [0 x i32] addrspace(5)* %tmp 538 %load = load [0 x i32], [0 x i32] addrspace(5)* %tmp 539 store [0 x i32] %load, [0 x i32] addrspace(1)* %out 540 ret void 541} 542 543; OPT-LABEL: @direct_alloca_read_1xi32( 544; OPT: store [1 x i32] zeroinitializer, [1 x i32] addrspace(3)* 545; OPT: load [1 x i32], [1 x i32] addrspace(3)* 546define amdgpu_kernel void @direct_alloca_read_1xi32([1 x i32] addrspace(1)* %out, i32 %index) { 547entry: 548 %tmp = alloca [1 x i32], addrspace(5) 549 store [1 x i32] [i32 0], [1 x i32] addrspace(5)* %tmp 550 %load = load [1 x i32], [1 x i32] addrspace(5)* %tmp 551 store [1 x i32] %load, [1 x i32] addrspace(1)* %out 552 ret void 553} 554 555attributes #0 = { nounwind "amdgpu-waves-per-eu"="1,2" "amdgpu-flat-work-group-size"="1,256" } 556attributes #1 = { nounwind "amdgpu-flat-work-group-size"="1,256" } 557 558; HSAOPT: !0 = !{} 559; HSAOPT: !1 = !{i32 0, i32 257} 560; HSAOPT: !2 = !{i32 0, i32 256} 561 562; NOHSAOPT: !0 = !{i32 0, i32 257} 563; NOHSAOPT: !1 = !{i32 0, i32 256} 564