xref: /llvm-project/llvm/test/CodeGen/AMDGPU/amdgpu-late-codegenprepare.ll (revision 38fffa630ee80163dc65e759392ad29798905679)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -amdgpu-late-codegenprepare %s | FileCheck %s -check-prefix=GFX9
3; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -amdgpu-late-codegenprepare %s | FileCheck %s -check-prefix=GFX12
4; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=amdgpu-late-codegenprepare %s | FileCheck %s -check-prefix=GFX9
5
6; Make sure we don't crash when trying to create a bitcast between
7; address spaces
8define amdgpu_kernel void @constant_from_offset_cast_generic_null() {
9; GFX9-LABEL: @constant_from_offset_cast_generic_null(
10; GFX9-NEXT:    [[TMP1:%.*]] = load i32, ptr addrspace(4) getelementptr (i8, ptr addrspace(4) addrspacecast (ptr null to ptr addrspace(4)), i64 4), align 4
11; GFX9-NEXT:    [[TMP2:%.*]] = lshr i32 [[TMP1]], 16
12; GFX9-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
13; GFX9-NEXT:    store i8 [[TMP3]], ptr addrspace(1) undef, align 1
14; GFX9-NEXT:    ret void
15;
16; GFX12-LABEL: @constant_from_offset_cast_generic_null(
17; GFX12-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr null to ptr addrspace(4)), i64 6), align 1
18; GFX12-NEXT:    store i8 [[LOAD]], ptr addrspace(1) undef, align 1
19; GFX12-NEXT:    ret void
20;
21  %load = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr null to ptr addrspace(4)), i64 6), align 1
22  store i8 %load, ptr addrspace(1) undef
23  ret void
24}
25
26define amdgpu_kernel void @constant_from_offset_cast_global_null() {
27; GFX9-LABEL: @constant_from_offset_cast_global_null(
28; GFX9-NEXT:    [[TMP1:%.*]] = load i32, ptr addrspace(4) getelementptr (i8, ptr addrspace(4) addrspacecast (ptr addrspace(1) null to ptr addrspace(4)), i64 4), align 4
29; GFX9-NEXT:    [[TMP2:%.*]] = lshr i32 [[TMP1]], 16
30; GFX9-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
31; GFX9-NEXT:    store i8 [[TMP3]], ptr addrspace(1) undef, align 1
32; GFX9-NEXT:    ret void
33;
34; GFX12-LABEL: @constant_from_offset_cast_global_null(
35; GFX12-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr addrspace(1) null to ptr addrspace(4)), i64 6), align 1
36; GFX12-NEXT:    store i8 [[LOAD]], ptr addrspace(1) undef, align 1
37; GFX12-NEXT:    ret void
38;
39  %load = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr addrspace(1) null to ptr addrspace(4)), i64 6), align 1
40  store i8 %load, ptr addrspace(1) undef
41  ret void
42}
43
44@gv = unnamed_addr addrspace(1) global [64 x i8] undef, align 4
45
46define amdgpu_kernel void @constant_from_offset_cast_global_gv() {
47; GFX9-LABEL: @constant_from_offset_cast_global_gv(
48; GFX9-NEXT:    [[TMP1:%.*]] = load i32, ptr addrspace(4) getelementptr (i8, ptr addrspace(4) addrspacecast (ptr addrspace(1) @gv to ptr addrspace(4)), i64 4), align 4
49; GFX9-NEXT:    [[TMP2:%.*]] = lshr i32 [[TMP1]], 16
50; GFX9-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
51; GFX9-NEXT:    store i8 [[TMP3]], ptr addrspace(1) undef, align 1
52; GFX9-NEXT:    ret void
53;
54; GFX12-LABEL: @constant_from_offset_cast_global_gv(
55; GFX12-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr addrspace(1) @gv to ptr addrspace(4)), i64 6), align 1
56; GFX12-NEXT:    store i8 [[LOAD]], ptr addrspace(1) undef, align 1
57; GFX12-NEXT:    ret void
58;
59  %load = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr addrspace(1) @gv to ptr addrspace(4)), i64 6), align 1
60  store i8 %load, ptr addrspace(1) undef
61  ret void
62}
63
64define amdgpu_kernel void @constant_from_offset_cast_generic_inttoptr() {
65; GFX9-LABEL: @constant_from_offset_cast_generic_inttoptr(
66; GFX9-NEXT:    [[TMP1:%.*]] = load i32, ptr addrspace(4) getelementptr (i8, ptr addrspace(4) addrspacecast (ptr inttoptr (i64 128 to ptr) to ptr addrspace(4)), i64 4), align 4
67; GFX9-NEXT:    [[TMP2:%.*]] = lshr i32 [[TMP1]], 16
68; GFX9-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
69; GFX9-NEXT:    store i8 [[TMP3]], ptr addrspace(1) undef, align 1
70; GFX9-NEXT:    ret void
71;
72; GFX12-LABEL: @constant_from_offset_cast_generic_inttoptr(
73; GFX12-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr inttoptr (i64 128 to ptr) to ptr addrspace(4)), i64 6), align 1
74; GFX12-NEXT:    store i8 [[LOAD]], ptr addrspace(1) undef, align 1
75; GFX12-NEXT:    ret void
76;
77  %load = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr inttoptr (i64 128 to ptr) to ptr addrspace(4)), i64 6), align 1
78  store i8 %load, ptr addrspace(1) undef
79  ret void
80}
81
82define amdgpu_kernel void @constant_from_inttoptr() {
83; GFX9-LABEL: @constant_from_inttoptr(
84; GFX9-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(4) inttoptr (i64 128 to ptr addrspace(4)), align 4
85; GFX9-NEXT:    store i8 [[LOAD]], ptr addrspace(1) undef, align 1
86; GFX9-NEXT:    ret void
87;
88; GFX12-LABEL: @constant_from_inttoptr(
89; GFX12-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(4) inttoptr (i64 128 to ptr addrspace(4)), align 1
90; GFX12-NEXT:    store i8 [[LOAD]], ptr addrspace(1) undef, align 1
91; GFX12-NEXT:    ret void
92;
93  %load = load i8, ptr addrspace(4) inttoptr (i64 128 to ptr addrspace(4)), align 1
94  store i8 %load, ptr addrspace(1) undef
95  ret void
96}
97
98define void @broken_phi() {
99; GFX9-LABEL: @broken_phi(
100; GFX9-NEXT:  bb:
101; GFX9-NEXT:    br label [[BB1:%.*]]
102; GFX9:       bb1:
103; GFX9-NEXT:    [[I:%.*]] = phi <4 x i8> [ splat (i8 1), [[BB:%.*]] ], [ [[I8:%.*]], [[BB7:%.*]] ]
104; GFX9-NEXT:    br i1 false, label [[BB3:%.*]], label [[BB2:%.*]]
105; GFX9:       bb2:
106; GFX9-NEXT:    br label [[BB3]]
107; GFX9:       bb3:
108; GFX9-NEXT:    [[I4:%.*]] = phi <4 x i8> [ zeroinitializer, [[BB2]] ], [ [[I]], [[BB1]] ]
109; GFX9-NEXT:    br i1 false, label [[BB7]], label [[BB5:%.*]]
110; GFX9:       bb5:
111; GFX9-NEXT:    [[I6:%.*]] = call <4 x i8> @llvm.smax.v4i8(<4 x i8> [[I4]], <4 x i8> zeroinitializer)
112; GFX9-NEXT:    br label [[BB7]]
113; GFX9:       bb7:
114; GFX9-NEXT:    [[I8]] = phi <4 x i8> [ zeroinitializer, [[BB5]] ], [ zeroinitializer, [[BB3]] ]
115; GFX9-NEXT:    br label [[BB1]]
116;
117; GFX12-LABEL: @broken_phi(
118; GFX12-NEXT:  bb:
119; GFX12-NEXT:    br label [[BB1:%.*]]
120; GFX12:       bb1:
121; GFX12-NEXT:    [[I:%.*]] = phi <4 x i8> [ splat (i8 1), [[BB:%.*]] ], [ [[I8:%.*]], [[BB7:%.*]] ]
122; GFX12-NEXT:    br i1 false, label [[BB3:%.*]], label [[BB2:%.*]]
123; GFX12:       bb2:
124; GFX12-NEXT:    br label [[BB3]]
125; GFX12:       bb3:
126; GFX12-NEXT:    [[I4:%.*]] = phi <4 x i8> [ zeroinitializer, [[BB2]] ], [ [[I]], [[BB1]] ]
127; GFX12-NEXT:    br i1 false, label [[BB7]], label [[BB5:%.*]]
128; GFX12:       bb5:
129; GFX12-NEXT:    [[I6:%.*]] = call <4 x i8> @llvm.smax.v4i8(<4 x i8> [[I4]], <4 x i8> zeroinitializer)
130; GFX12-NEXT:    br label [[BB7]]
131; GFX12:       bb7:
132; GFX12-NEXT:    [[I8]] = phi <4 x i8> [ zeroinitializer, [[BB5]] ], [ zeroinitializer, [[BB3]] ]
133; GFX12-NEXT:    br label [[BB1]]
134;
135bb:
136  br label %bb1
137bb1:
138  %i = phi <4 x i8> [ <i8 1, i8 1, i8 1, i8 1>, %bb ], [ %i8, %bb7 ]
139  br i1 false, label %bb3, label %bb2
140bb2:
141  br label %bb3
142bb3:
143  %i4 = phi <4 x i8> [ zeroinitializer, %bb2 ], [ %i, %bb1 ]
144  br i1 false, label %bb7, label %bb5
145bb5:
146  %i6 = call <4 x i8> @llvm.smax.v4i8(<4 x i8> %i4, <4 x i8> zeroinitializer)
147  br label %bb7
148bb7:
149  %i8 = phi <4 x i8> [ zeroinitializer, %bb5 ], [ zeroinitializer, %bb3 ]
150  br label %bb1
151}
152