xref: /llvm-project/llvm/test/Transforms/StructurizeCFG/AMDGPU/loop-subregion-misordered.ll (revision 54d31bde324523d946fd87f5c5d5e271826209d6)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -structurizecfg %s | FileCheck %s
3;
4; StructurizeCFG::orderNodes basically uses a reverse post-order (RPO) traversal of the region
5; list to get the order. The only problem with it is that sometimes backedges
6; for outer loops will be visited before backedges for inner loops. To solve this problem,
7; a loop depth based approach has been used to make sure all blocks in this loop has been visited
8; before moving on to outer loop.
9;
10; However, we found a problem for a SubRegion which is a loop itself:
11;                   _
12;                  | |
13;                  V |
14;      --> BB1 --> BB2 --> BB3 -->
15;
16; In this case, BB2 is a SubRegion (loop), and thus its loopdepth is different than that of
17; BB1 and BB3. This fact will lead BB2 to be placed in the wrong order.
18;
19; In this work, we treat the SubRegion as a special case and use its exit block to determine
20; the loop and its depth to guard the sorting.
21define amdgpu_kernel void @loop_subregion_misordered(ptr addrspace(1) %arg0) #0 {
22; CHECK-LABEL: @loop_subregion_misordered(
23; CHECK-NEXT:  entry:
24; CHECK-NEXT:    [[TMP:%.*]] = load volatile <2 x i32>, ptr addrspace(1) undef, align 16
25; CHECK-NEXT:    [[LOAD1:%.*]] = load volatile <2 x float>, ptr addrspace(1) undef, align 8
26; CHECK-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
27; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[ARG0:%.*]], i32 [[TID]]
28; CHECK-NEXT:    [[I_INITIAL:%.*]] = load volatile i32, ptr addrspace(1) [[GEP]], align 4
29; CHECK-NEXT:    br label [[LOOP_HEADER:%.*]]
30; CHECK:       LOOP.HEADER:
31; CHECK-NEXT:    [[I:%.*]] = phi i32 [ [[I_INITIAL]], [[ENTRY:%.*]] ], [ [[TMP3:%.*]], [[FLOW3:%.*]] ]
32; CHECK-NEXT:    call void asm sideeffect "s_nop 0x100b
33; CHECK-NEXT:    [[TMP12:%.*]] = zext i32 [[I]] to i64
34; CHECK-NEXT:    [[TMP13:%.*]] = getelementptr inbounds <4 x i32>, ptr addrspace(1) null, i64 [[TMP12]]
35; CHECK-NEXT:    [[TMP14:%.*]] = load <4 x i32>, ptr addrspace(1) [[TMP13]], align 16
36; CHECK-NEXT:    [[TMP15:%.*]] = extractelement <4 x i32> [[TMP14]], i64 0
37; CHECK-NEXT:    [[TMP16:%.*]] = and i32 [[TMP15]], 65535
38; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 1
39; CHECK-NEXT:    br i1 [[TMP17]], label [[BB62:%.*]], label [[FLOW:%.*]]
40; CHECK:       Flow1:
41; CHECK-NEXT:    [[TMP0:%.*]] = phi i32 [ [[INC_I:%.*]], [[INCREMENT_I:%.*]] ], [ undef, [[BB62]] ]
42; CHECK-NEXT:    [[TMP1:%.*]] = phi i1 [ false, [[INCREMENT_I]] ], [ true, [[BB62]] ]
43; CHECK-NEXT:    [[TMP2:%.*]] = phi i1 [ true, [[INCREMENT_I]] ], [ false, [[BB62]] ]
44; CHECK-NEXT:    br label [[FLOW]]
45; CHECK:       bb18:
46; CHECK-NEXT:    [[TMP19:%.*]] = extractelement <2 x i32> [[TMP]], i64 0
47; CHECK-NEXT:    [[TMP22:%.*]] = lshr i32 [[TMP19]], 16
48; CHECK-NEXT:    [[TMP24:%.*]] = urem i32 [[TMP22]], 52
49; CHECK-NEXT:    [[TMP25:%.*]] = mul nuw nsw i32 [[TMP24]], 52
50; CHECK-NEXT:    br label [[INNER_LOOP:%.*]]
51; CHECK:       Flow2:
52; CHECK-NEXT:    [[TMP3]] = phi i32 [ [[TMP59:%.*]], [[INNER_LOOP_BREAK:%.*]] ], [ [[TMP6:%.*]], [[FLOW]] ]
53; CHECK-NEXT:    [[TMP4:%.*]] = phi i1 [ true, [[INNER_LOOP_BREAK]] ], [ [[TMP8:%.*]], [[FLOW]] ]
54; CHECK-NEXT:    br i1 [[TMP4]], label [[END_ELSE_BLOCK:%.*]], label [[FLOW3]]
55; CHECK:       INNER_LOOP:
56; CHECK-NEXT:    [[INNER_LOOP_J:%.*]] = phi i32 [ [[INNER_LOOP_J_INC:%.*]], [[INNER_LOOP]] ], [ [[TMP25]], [[BB18:%.*]] ]
57; CHECK-NEXT:    call void asm sideeffect "
58; CHECK-NEXT:    [[INNER_LOOP_J_INC]] = add nsw i32 [[INNER_LOOP_J]], 1
59; CHECK-NEXT:    [[INNER_LOOP_CMP:%.*]] = icmp eq i32 [[INNER_LOOP_J]], 0
60; CHECK-NEXT:    br i1 [[INNER_LOOP_CMP]], label [[INNER_LOOP_BREAK]], label [[INNER_LOOP]]
61; CHECK:       INNER_LOOP_BREAK:
62; CHECK-NEXT:    [[TMP59]] = extractelement <4 x i32> [[TMP14]], i64 2
63; CHECK-NEXT:    call void asm sideeffect "s_nop 23 ", "~{memory}"() #[[ATTR0:[0-9]+]]
64; CHECK-NEXT:    br label [[FLOW2:%.*]]
65; CHECK:       bb62:
66; CHECK-NEXT:    [[LOAD13:%.*]] = icmp uge i32 [[TMP16]], 271
67; CHECK-NEXT:    br i1 [[LOAD13]], label [[INCREMENT_I]], label [[FLOW1:%.*]]
68; CHECK:       Flow3:
69; CHECK-NEXT:    [[TMP5:%.*]] = phi i1 [ [[CMP_END_ELSE_BLOCK:%.*]], [[END_ELSE_BLOCK]] ], [ true, [[FLOW2]] ]
70; CHECK-NEXT:    br i1 [[TMP5]], label [[FLOW4:%.*]], label [[LOOP_HEADER]]
71; CHECK:       Flow4:
72; CHECK-NEXT:    br i1 [[TMP7:%.*]], label [[BB64:%.*]], label [[RETURN:%.*]]
73; CHECK:       bb64:
74; CHECK-NEXT:    call void asm sideeffect "s_nop 42", "~{memory}"() #[[ATTR0]]
75; CHECK-NEXT:    br label [[RETURN]]
76; CHECK:       Flow:
77; CHECK-NEXT:    [[TMP6]] = phi i32 [ [[TMP0]], [[FLOW1]] ], [ undef, [[LOOP_HEADER]] ]
78; CHECK-NEXT:    [[TMP7]] = phi i1 [ [[TMP1]], [[FLOW1]] ], [ false, [[LOOP_HEADER]] ]
79; CHECK-NEXT:    [[TMP8]] = phi i1 [ [[TMP2]], [[FLOW1]] ], [ false, [[LOOP_HEADER]] ]
80; CHECK-NEXT:    [[TMP9:%.*]] = phi i1 [ false, [[FLOW1]] ], [ true, [[LOOP_HEADER]] ]
81; CHECK-NEXT:    br i1 [[TMP9]], label [[BB18]], label [[FLOW2]]
82; CHECK:       INCREMENT_I:
83; CHECK-NEXT:    [[INC_I]] = add i32 [[I]], 1
84; CHECK-NEXT:    call void asm sideeffect "s_nop 0x1336
85; CHECK-NEXT:    br label [[FLOW1]]
86; CHECK:       END_ELSE_BLOCK:
87; CHECK-NEXT:    call void asm sideeffect "s_nop 0x1337
88; CHECK-NEXT:    [[CMP_END_ELSE_BLOCK]] = icmp eq i32 [[TMP3]], -1
89; CHECK-NEXT:    br label [[FLOW3]]
90; CHECK:       RETURN:
91; CHECK-NEXT:    call void asm sideeffect "s_nop 0x99
92; CHECK-NEXT:    store volatile <2 x float> [[LOAD1]], ptr addrspace(1) undef, align 8
93; CHECK-NEXT:    ret void
94;
95entry:
96  %tmp = load volatile <2 x i32>, ptr addrspace(1) undef, align 16
97  %load1 = load volatile <2 x float>, ptr addrspace(1) undef
98  %tid = call i32 @llvm.amdgcn.workitem.id.x()
99  %gep = getelementptr inbounds i32, ptr addrspace(1) %arg0, i32 %tid
100  %i.initial = load volatile i32, ptr addrspace(1) %gep, align 4
101  br label %LOOP.HEADER
102
103LOOP.HEADER:
104  %i = phi i32 [ %i.final, %END_ELSE_BLOCK ], [ %i.initial, %entry ]
105  call void asm sideeffect "s_nop 0x100b ; loop $0 ", "r,~{memory}"(i32 %i) #0
106  %tmp12 = zext i32 %i to i64
107  %tmp13 = getelementptr inbounds <4 x i32>, ptr addrspace(1) null, i64 %tmp12
108  %tmp14 = load <4 x i32>, ptr addrspace(1) %tmp13, align 16
109  %tmp15 = extractelement <4 x i32> %tmp14, i64 0
110  %tmp16 = and i32 %tmp15, 65535
111  %tmp17 = icmp eq i32 %tmp16, 1
112  br i1 %tmp17, label %bb18, label %bb62
113
114bb18:
115  %tmp19 = extractelement <2 x i32> %tmp, i64 0
116  %tmp22 = lshr i32 %tmp19, 16
117  %tmp24 = urem i32 %tmp22, 52
118  %tmp25 = mul nuw nsw i32 %tmp24, 52
119  br label %INNER_LOOP
120
121INNER_LOOP:
122  %inner.loop.j = phi i32 [ %tmp25, %bb18 ], [ %inner.loop.j.inc, %INNER_LOOP ]
123  call void asm sideeffect "; inner loop body", ""() #0
124  %inner.loop.j.inc = add nsw i32 %inner.loop.j, 1
125  %inner.loop.cmp = icmp eq i32 %inner.loop.j, 0
126  br i1 %inner.loop.cmp, label %INNER_LOOP_BREAK, label %INNER_LOOP
127
128INNER_LOOP_BREAK:
129  %tmp59 = extractelement <4 x i32> %tmp14, i64 2
130  call void asm sideeffect "s_nop 23 ", "~{memory}"() #0
131  br label %END_ELSE_BLOCK
132
133bb62:
134  %load13 = icmp ult i32 %tmp16, 271
135  br i1 %load13, label %bb64, label %INCREMENT_I
136
137bb64:
138  call void asm sideeffect "s_nop 42", "~{memory}"() #0
139  br label %RETURN
140
141INCREMENT_I:
142  %inc.i = add i32 %i, 1
143  call void asm sideeffect "s_nop 0x1336 ; increment $0", "v,~{memory}"(i32 %inc.i) #0
144  br label %END_ELSE_BLOCK
145
146END_ELSE_BLOCK:
147  %i.final = phi i32 [ %tmp59, %INNER_LOOP_BREAK ], [ %inc.i, %INCREMENT_I ]
148  call void asm sideeffect "s_nop 0x1337 ; end else block $0", "v,~{memory}"(i32 %i.final) #0
149  %cmp.end.else.block = icmp eq i32 %i.final, -1
150  br i1 %cmp.end.else.block, label %RETURN, label %LOOP.HEADER
151
152RETURN:
153  call void asm sideeffect "s_nop 0x99 ; ClosureEval return", "~{memory}"() #0
154  store volatile <2 x float> %load1, ptr addrspace(1) undef, align 8
155  ret void
156}
157
158declare i32 @llvm.amdgcn.workitem.id.x() #1
159
160attributes #0 = { convergent nounwind }
161attributes #1 = { convergent nounwind readnone }
162