xref: /llvm-project/llvm/test/CodeGen/AMDGPU/sched.barrier.inverted.mask.ll (revision 9e9907f1cfa424366fba58d9520f9305b537cec9)
1; REQUIRES: asserts
2
3; RUN: llc -mtriple=amdgcn < %s -debug-only=igrouplp 2>&1 | FileCheck --check-prefixes=GCN %s
4
5
6
7
8; Inverted 1008: 01111110000
9; GCN: After Inverting, SchedGroup Mask: 1008
10define amdgpu_kernel void @invert1() #0 {
11entry:
12  call void @llvm.amdgcn.sched.barrier(i32 1) #1
13  call void @llvm.amdcn.s.nop(i16 0) #1
14  ret void
15}
16
17; Inverted 2044: 11111111100
18; GCN:       After Inverting, SchedGroup Mask: 2044
19define amdgpu_kernel void @invert2() #0 {
20entry:
21  call void @llvm.amdgcn.sched.barrier(i32 2) #1
22  call void @llvm.amdcn.s.nop(i16 0) #1
23  ret void
24}
25
26; Inverted 2042: 11111111010
27; GCN:       After Inverting, SchedGroup Mask: 2042
28define amdgpu_kernel void @invert4() #0 {
29entry:
30  call void @llvm.amdgcn.sched.barrier(i32 4) #1
31  call void @llvm.amdcn.s.nop(i16 0) #1
32  ret void
33}
34
35; Inverted 2038: 11111110110
36; GCN:       After Inverting, SchedGroup Mask: 2038
37define amdgpu_kernel void @invert8() #0 {
38entry:
39  call void @llvm.amdgcn.sched.barrier(i32 8) #1
40  call void @llvm.amdcn.s.nop(i16 0) #1
41  ret void
42}
43
44; Inverted 1935: 11110001111
45; GCN:       After Inverting, SchedGroup Mask: 1935
46define amdgpu_kernel void @invert16() #0 {
47entry:
48  call void @llvm.amdgcn.sched.barrier(i32 16) #1
49  call void @llvm.amdcn.s.nop(i16 0) #1
50  ret void
51}
52
53; Inverted 1999: 11111001111
54; GCN:       After Inverting, SchedGroup Mask: 1999
55define amdgpu_kernel void @invert32() #0 {
56entry:
57  call void @llvm.amdgcn.sched.barrier(i32 32) #1
58  call void @llvm.amdcn.s.nop(i16 0) #1
59  ret void
60}
61
62; Inverted 1967: 11110101111
63; GCN:       After Inverting, SchedGroup Mask: 1967
64define amdgpu_kernel void @invert64() #0 {
65entry:
66  call void @llvm.amdgcn.sched.barrier(i32 64) #1
67  call void @llvm.amdcn.s.nop(i16 0) #1
68  ret void
69}
70
71; Inverted 1151: 10001111111
72; GCN:       After Inverting, SchedGroup Mask: 1151
73define amdgpu_kernel void @invert128() #0 {
74entry:
75  call void @llvm.amdgcn.sched.barrier(i32 128) #1
76  call void @llvm.amdcn.s.nop(i16 0) #1
77  ret void
78}
79
80; Inverted 1663: 11001111111
81; GCN:       After Inverting, SchedGroup Mask: 1663
82define amdgpu_kernel void @invert256() #0 {
83entry:
84  call void @llvm.amdgcn.sched.barrier(i32 256) #1
85  call void @llvm.amdcn.s.nop(i16 0) #1
86  ret void
87}
88
89; Inverted 1407: 10101111111
90; GCN:       After Inverting, SchedGroup Mask: 1407
91define amdgpu_kernel void @invert512() #0 {
92entry:
93  call void @llvm.amdgcn.sched.barrier(i32 512) #1
94  call void @llvm.amdcn.s.nop(i16 0) #1
95  ret void
96}
97
98; Inverted 1022: 01111111110
99; GCN:       After Inverting, SchedGroup Mask: 1022
100define amdgpu_kernel void @invert1024() #0 {
101entry:
102  call void @llvm.amdgcn.sched.barrier(i32 1024) #1
103  call void @llvm.amdcn.s.nop(i16 0) #1
104  ret void
105}
106
107declare void @llvm.amdgcn.sched.barrier(i32) #1
108declare void @llvm.amdcn.s.nop(i16) #1
109
110attributes #0 = { nounwind }
111attributes #1 = { convergent nounwind }
112