xref: /llvm-project/llvm/test/Transforms/InferAddressSpaces/AMDGPU/old-pass-regressions.ll (revision d10b76552f919ddb84347ab03908a55804ea6b8a)
1; RUN: opt -data-layout=A5 -S -mtriple=amdgcn-amd-amdhsa -passes=infer-address-spaces %s | FileCheck %s
2
3; Regression tests from old HSAIL addrspacecast optimization pass
4
5@data = internal addrspace(1) global [100 x double] [double 0.00, double 1.000000e-01, double 2.000000e-01, double 3.000000e-01, double 4.000000e-01, double 5.000000e-01, double 6.000000e-01, double 7.000000e-01, double 8.000000e-01, double 9.000000e-01, double 1.00, double 1.10, double 1.20, double 1.30, double 1.40, double 1.50, double 1.60, double 1.70, double 1.80, double 1.90, double 2.00, double 2.10, double 2.20, double 2.30, double 2.40, double 2.50, double 2.60, double 2.70, double 2.80, double 2.90, double 3.00, double 3.10, double 3.20, double 3.30, double 3.40, double 3.50, double 3.60, double 3.70, double 3.80, double 3.90, double 4.00, double 4.10, double 4.20, double 4.30, double 4.40, double 4.50, double 4.60, double 4.70, double 4.80, double 4.90, double 5.00, double 5.10, double 5.20, double 5.30, double 5.40, double 5.50, double 5.60, double 5.70, double 5.80, double 5.90, double 6.00, double 6.10, double 6.20, double 6.30, double 6.40, double 6.50, double 6.60, double 6.70, double 6.80, double 6.90, double 7.00, double 7.10, double 7.20, double 7.30, double 7.40, double 7.50, double 7.60, double 7.70, double 7.80, double 7.90, double 8.00, double 8.10, double 8.20, double 8.30, double 8.40, double 8.50, double 8.60, double 8.70, double 8.80, double 8.90, double 9.00, double 9.10, double 9.20, double 9.30, double 9.40, double 9.50, double 9.60, double 9.70, double 9.80, double 9.90], align 8
6
7
8; Should generate flat load
9
10; CHECK-LABEL: @generic_address_bitcast_const(
11; CHECK: %vecload1 = load <2 x double>, ptr addrspace(1) getelementptr ([100 x double], ptr addrspace(1) @data, i64 0, i64 4), align 8
12define amdgpu_kernel void @generic_address_bitcast_const(i64 %arg0, ptr addrspace(1) nocapture %results) #0 {
13entry:
14  %tmp1 = call i32 @llvm.amdgcn.workitem.id.x()
15  %tmp2 = zext i32 %tmp1 to i64
16  %tmp3 = add i64 %tmp2, %arg0
17  %vecload1 = load <2 x double>, ptr bitcast (ptr getelementptr ([100 x double], ptr addrspacecast (ptr addrspace(1) @data to ptr), i64 0, i64 4) to ptr), align 8
18  %cmp = fcmp ord <2 x double> %vecload1, zeroinitializer
19  %sext = sext <2 x i1> %cmp to <2 x i64>
20  %tmp4 = extractelement <2 x i64> %sext, i64 0
21  %tmp5 = extractelement <2 x i64> %sext, i64 1
22  %tmp6 = and i64 %tmp4, %tmp5
23  %tmp7 = lshr i64 %tmp6, 63
24  %tmp8 = trunc i64 %tmp7 to i32
25  %idxprom = and i64 %tmp3, 4294967295
26  %arrayidx = getelementptr inbounds i32, ptr addrspace(1) %results, i64 %idxprom
27  store i32 %tmp8, ptr addrspace(1) %arrayidx, align 4
28  ret void
29}
30
31@generic_address_bug9749.val = internal addrspace(1) global float 0.0, align 4
32
33declare i32 @_Z9get_fencePv(ptr)
34%opencl.pipe_t = type opaque
35
36; This is a compile time assert bug, but we still want to check optimization
37; is performed to generate ld_global.
38; CHECK-LABEL: @generic_address_pipe_bug9673(
39; CHECK: %add.ptr = getelementptr inbounds i32, ptr addrspace(3) %in_pipe, i32 2
40; CHECK: %tmp2 = load i32, ptr addrspace(3) %add.ptr, align 4
41define amdgpu_kernel void @generic_address_pipe_bug9673(ptr addrspace(3) nocapture %in_pipe, ptr addrspace(1) nocapture %dst) #0 {
42entry:
43  %tmp = call i32 @llvm.amdgcn.workitem.id.x()
44  %add.ptr = getelementptr inbounds i32, ptr addrspace(3) %in_pipe, i32 2
45  %tmp2 = load i32, ptr addrspace(3) %add.ptr, align 4
46  %arrayidx = getelementptr inbounds i32, ptr addrspace(1) %dst, i32 %tmp
47  store i32 %tmp2, ptr addrspace(1) %arrayidx, align 4
48  ret void
49}
50
51; Should generate flat load
52; CHECK-LABEL: @generic_address_bug9749(
53; CHECK: br i1
54; CHECK: load float, ptr
55; CHECK: br label
56define amdgpu_kernel void @generic_address_bug9749(ptr addrspace(1) nocapture %results) #0 {
57entry:
58  %ptr = alloca ptr, align 8, addrspace(5)
59  %tmp = call i32 @llvm.amdgcn.workitem.id.x()
60  %tmp1 = zext i32 %tmp to i64
61  store float 0x3FB99999A0000000, ptr addrspace(1) @generic_address_bug9749.val, align 4
62  store volatile ptr addrspacecast (ptr addrspace(1) @generic_address_bug9749.val to ptr), ptr addrspace(5) %ptr, align 8
63  %tmp2 = load volatile ptr, ptr addrspace(5) %ptr, align 8
64  %tmp3 = load float, ptr addrspace(1) @generic_address_bug9749.val, align 4
65  %call.i = call i32 @_Z9get_fencePv(ptr %tmp2) #1
66  %switch.i.i = icmp ult i32 %call.i, 4
67  br i1 %switch.i.i, label %if.end.i, label %helperFunction.exit
68
69if.end.i:                                         ; preds = %entry
70  %tmp5 = load float, ptr %tmp2, align 4
71  %not.cmp.i = fcmp oeq float %tmp5, %tmp3
72  %phitmp = zext i1 %not.cmp.i to i32
73  br label %helperFunction.exit
74
75helperFunction.exit:                              ; preds = %if.end.i, %entry
76  %retval.0.i = phi i32 [ 0, %entry ], [ %phitmp, %if.end.i ]
77  %arrayidx = getelementptr inbounds i32, ptr addrspace(1) %results, i64 %tmp1
78  store i32 %retval.0.i, ptr addrspace(1) %arrayidx, align 4
79  ret void
80}
81
82; CHECK-LABEL: @generic_address_opt_phi_bug9776_simple_phi_kernel(
83; CHECK: phi ptr addrspace(3)
84; CHECK: store i32 %i.03, ptr addrspace(3) %
85define amdgpu_kernel void @generic_address_opt_phi_bug9776_simple_phi_kernel(ptr addrspace(3) nocapture %in, i32 %numElems) #0 {
86entry:
87  %cmp1 = icmp eq i32 %numElems, 0
88  br i1 %cmp1, label %for.end, label %for.body.lr.ph
89
90for.body.lr.ph:                                   ; preds = %entry
91  %tmp = addrspacecast ptr addrspace(3) %in to ptr
92  br label %for.body
93
94for.body:                                         ; preds = %for.body, %for.body.lr.ph
95  %i.03 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
96  %ptr.02 = phi ptr [ %tmp, %for.body.lr.ph ], [ %add.ptr, %for.body ]
97  store i32 %i.03, ptr %ptr.02, align 4
98  %add.ptr = getelementptr inbounds i32, ptr %ptr.02, i64 4
99  %inc = add nuw i32 %i.03, 1
100  %exitcond = icmp eq i32 %inc, %numElems
101  br i1 %exitcond, label %for.end, label %for.body
102
103for.end:                                          ; preds = %for.body, %entry
104  ret void
105}
106
107; CHECK-LABEL: @generic_address_bug9899(
108; CHECK: %vecload = load <2 x i32>, ptr addrspace(3)
109; CHECK: store <2 x i32> %tmp16, ptr addrspace(3)
110define amdgpu_kernel void @generic_address_bug9899(i64 %arg0, ptr addrspace(3) nocapture %sourceA, ptr addrspace(3) nocapture %destValues) #0 {
111entry:
112  %tmp1 = call i32 @llvm.amdgcn.workitem.id.x()
113  %tmp2 = zext i32 %tmp1 to i64
114  %tmp3 = add i64 %tmp2, %arg0
115  %sext = shl i64 %tmp3, 32
116  %tmp4 = addrspacecast ptr addrspace(3) %destValues to ptr
117  %tmp5 = addrspacecast ptr addrspace(3) %sourceA to ptr
118  %tmp6 = ashr exact i64 %sext, 31
119  %tmp7 = getelementptr inbounds i32, ptr %tmp5, i64 %tmp6
120  %vecload = load <2 x i32>, ptr %tmp7, align 4
121  %tmp8 = extractelement <2 x i32> %vecload, i32 0
122  %tmp9 = extractelement <2 x i32> %vecload, i32 1
123  %tmp10 = icmp eq i32 %tmp8, 0
124  %tmp11 = select i1 %tmp10, i32 32, i32 %tmp8
125  %tmp12 = icmp eq i32 %tmp9, 0
126  %tmp13 = select i1 %tmp12, i32 32, i32 %tmp9
127  %tmp14 = getelementptr inbounds i32, ptr %tmp4, i64 %tmp6
128  %tmp15 = insertelement <2 x i32> undef, i32 %tmp11, i32 0
129  %tmp16 = insertelement <2 x i32> %tmp15, i32 %tmp13, i32 1
130  store <2 x i32> %tmp16, ptr %tmp14, align 4
131  ret void
132}
133
134declare i32 @llvm.amdgcn.workitem.id.x() #2
135
136attributes #0 = { nounwind }
137attributes #1 = { nounwind readonly }
138attributes #2 = { nounwind readnone }
139