xref: /llvm-project/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.wwm.ll (revision 9e9907f1cfa424366fba58d9520f9305b537cec9)
1; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2; RUN: llc -global-isel -mtriple=amdgcn -mcpu=hawaii -stop-after=instruction-select -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
3
4; NOTE: llvm.amdgcn.wwm is deprecated, use llvm.amdgcn.strict.wwm instead.
5
6define amdgpu_ps float @wwm_f32(float %val) {
7  ; GCN-LABEL: name: wwm_f32
8  ; GCN: bb.1 (%ir-block.0):
9  ; GCN-NEXT:   liveins: $vgpr0
10  ; GCN-NEXT: {{  $}}
11  ; GCN-NEXT:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
12  ; GCN-NEXT:   [[STRICT_WWM:%[0-9]+]]:vgpr_32 = STRICT_WWM [[COPY]], implicit $exec
13  ; GCN-NEXT:   $vgpr0 = COPY [[STRICT_WWM]]
14  ; GCN-NEXT:   SI_RETURN_TO_EPILOG implicit $vgpr0
15  %ret = call float @llvm.amdgcn.wwm.f32(float %val)
16  ret float %ret
17}
18
19define amdgpu_ps float @wwm_v2f16(float %arg) {
20  ; GCN-LABEL: name: wwm_v2f16
21  ; GCN: bb.1 (%ir-block.0):
22  ; GCN-NEXT:   liveins: $vgpr0
23  ; GCN-NEXT: {{  $}}
24  ; GCN-NEXT:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
25  ; GCN-NEXT:   [[STRICT_WWM:%[0-9]+]]:vgpr_32 = STRICT_WWM [[COPY]], implicit $exec
26  ; GCN-NEXT:   $vgpr0 = COPY [[STRICT_WWM]]
27  ; GCN-NEXT:   SI_RETURN_TO_EPILOG implicit $vgpr0
28  %val = bitcast float %arg to <2 x half>
29  %ret = call <2 x half> @llvm.amdgcn.wwm.v2f16(<2 x half> %val)
30  %bc = bitcast <2 x half> %ret to float
31  ret float %bc
32}
33
34define amdgpu_ps <2 x float> @wwm_f64(double %val) {
35  ; GCN-LABEL: name: wwm_f64
36  ; GCN: bb.1 (%ir-block.0):
37  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1
38  ; GCN-NEXT: {{  $}}
39  ; GCN-NEXT:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
40  ; GCN-NEXT:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
41  ; GCN-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
42  ; GCN-NEXT:   [[STRICT_WWM:%[0-9]+]]:vreg_64 = STRICT_WWM [[REG_SEQUENCE]], implicit $exec
43  ; GCN-NEXT:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[STRICT_WWM]].sub0
44  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[STRICT_WWM]].sub1
45  ; GCN-NEXT:   $vgpr0 = COPY [[COPY2]]
46  ; GCN-NEXT:   $vgpr1 = COPY [[COPY3]]
47  ; GCN-NEXT:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
48  %ret = call double @llvm.amdgcn.wwm.f64(double %val)
49  %bitcast = bitcast double %ret to <2 x float>
50  ret <2 x float> %bitcast
51}
52
53; TODO
54; define amdgpu_ps float @wwm_i1_vcc(float %val) {
55;   %vcc = fcmp oeq float %val, 0.0
56;   %ret = call i1 @llvm.amdgcn.wwm.i1(i1 %vcc)
57;   %select = select i1 %ret, float 1.0, float 0.0
58;   ret float %select
59; }
60
61define amdgpu_ps <3 x float> @wwm_v3f32(<3 x float> %val) {
62  ; GCN-LABEL: name: wwm_v3f32
63  ; GCN: bb.1 (%ir-block.0):
64  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2
65  ; GCN-NEXT: {{  $}}
66  ; GCN-NEXT:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
67  ; GCN-NEXT:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
68  ; GCN-NEXT:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
69  ; GCN-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2
70  ; GCN-NEXT:   [[STRICT_WWM:%[0-9]+]]:vreg_96 = STRICT_WWM [[REG_SEQUENCE]], implicit $exec
71  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[STRICT_WWM]].sub0
72  ; GCN-NEXT:   [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[STRICT_WWM]].sub1
73  ; GCN-NEXT:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[STRICT_WWM]].sub2
74  ; GCN-NEXT:   $vgpr0 = COPY [[COPY3]]
75  ; GCN-NEXT:   $vgpr1 = COPY [[COPY4]]
76  ; GCN-NEXT:   $vgpr2 = COPY [[COPY5]]
77  ; GCN-NEXT:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
78  %ret = call <3 x float> @llvm.amdgcn.wwm.v3f32(<3 x float> %val)
79  ret <3 x float> %ret
80}
81
82define amdgpu_ps float @strict_wwm_f32(float %val) {
83  ; GCN-LABEL: name: strict_wwm_f32
84  ; GCN: bb.1 (%ir-block.0):
85  ; GCN-NEXT:   liveins: $vgpr0
86  ; GCN-NEXT: {{  $}}
87  ; GCN-NEXT:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
88  ; GCN-NEXT:   [[STRICT_WWM:%[0-9]+]]:vgpr_32 = STRICT_WWM [[COPY]], implicit $exec
89  ; GCN-NEXT:   $vgpr0 = COPY [[STRICT_WWM]]
90  ; GCN-NEXT:   SI_RETURN_TO_EPILOG implicit $vgpr0
91  %ret = call float @llvm.amdgcn.strict.wwm.f32(float %val)
92  ret float %ret
93}
94
95define amdgpu_ps float @strict_wwm_v2f16(float %arg) {
96  ; GCN-LABEL: name: strict_wwm_v2f16
97  ; GCN: bb.1 (%ir-block.0):
98  ; GCN-NEXT:   liveins: $vgpr0
99  ; GCN-NEXT: {{  $}}
100  ; GCN-NEXT:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
101  ; GCN-NEXT:   [[STRICT_WWM:%[0-9]+]]:vgpr_32 = STRICT_WWM [[COPY]], implicit $exec
102  ; GCN-NEXT:   $vgpr0 = COPY [[STRICT_WWM]]
103  ; GCN-NEXT:   SI_RETURN_TO_EPILOG implicit $vgpr0
104  %val = bitcast float %arg to <2 x half>
105  %ret = call <2 x half> @llvm.amdgcn.strict.wwm.v2f16(<2 x half> %val)
106  %bc = bitcast <2 x half> %ret to float
107  ret float %bc
108}
109
110define amdgpu_ps <2 x float> @strict_wwm_f64(double %val) {
111  ; GCN-LABEL: name: strict_wwm_f64
112  ; GCN: bb.1 (%ir-block.0):
113  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1
114  ; GCN-NEXT: {{  $}}
115  ; GCN-NEXT:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
116  ; GCN-NEXT:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
117  ; GCN-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
118  ; GCN-NEXT:   [[STRICT_WWM:%[0-9]+]]:vreg_64 = STRICT_WWM [[REG_SEQUENCE]], implicit $exec
119  ; GCN-NEXT:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[STRICT_WWM]].sub0
120  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[STRICT_WWM]].sub1
121  ; GCN-NEXT:   $vgpr0 = COPY [[COPY2]]
122  ; GCN-NEXT:   $vgpr1 = COPY [[COPY3]]
123  ; GCN-NEXT:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
124  %ret = call double @llvm.amdgcn.strict.wwm.f64(double %val)
125  %bitcast = bitcast double %ret to <2 x float>
126  ret <2 x float> %bitcast
127}
128
129; TODO
130; define amdgpu_ps float @strict_wwm_i1_vcc(float %val) {
131;   %vcc = fcmp oeq float %val, 0.0
132;   %ret = call i1 @llvm.amdgcn.strict.wwm.i1(i1 %vcc)
133;   %select = select i1 %ret, float 1.0, float 0.0
134;   ret float %select
135; }
136
137define amdgpu_ps <3 x float> @strict_wwm_v3f32(<3 x float> %val) {
138  ; GCN-LABEL: name: strict_wwm_v3f32
139  ; GCN: bb.1 (%ir-block.0):
140  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2
141  ; GCN-NEXT: {{  $}}
142  ; GCN-NEXT:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
143  ; GCN-NEXT:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
144  ; GCN-NEXT:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
145  ; GCN-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2
146  ; GCN-NEXT:   [[STRICT_WWM:%[0-9]+]]:vreg_96 = STRICT_WWM [[REG_SEQUENCE]], implicit $exec
147  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[STRICT_WWM]].sub0
148  ; GCN-NEXT:   [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[STRICT_WWM]].sub1
149  ; GCN-NEXT:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[STRICT_WWM]].sub2
150  ; GCN-NEXT:   $vgpr0 = COPY [[COPY3]]
151  ; GCN-NEXT:   $vgpr1 = COPY [[COPY4]]
152  ; GCN-NEXT:   $vgpr2 = COPY [[COPY5]]
153  ; GCN-NEXT:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
154  %ret = call <3 x float> @llvm.amdgcn.strict.wwm.v3f32(<3 x float> %val)
155  ret <3 x float> %ret
156}
157
158declare i1 @llvm.amdgcn.wwm.i1(i1) #0
159declare float @llvm.amdgcn.wwm.f32(float) #0
160declare <2 x half> @llvm.amdgcn.wwm.v2f16(<2 x half>) #0
161declare <3 x float> @llvm.amdgcn.wwm.v3f32(<3 x float>) #0
162declare double @llvm.amdgcn.wwm.f64(double) #0
163declare i1 @llvm.amdgcn.strict.wwm.i1(i1) #0
164declare float @llvm.amdgcn.strict.wwm.f32(float) #0
165declare <2 x half> @llvm.amdgcn.strict.wwm.v2f16(<2 x half>) #0
166declare <3 x float> @llvm.amdgcn.strict.wwm.v3f32(<3 x float>) #0
167declare double @llvm.amdgcn.strict.wwm.f64(double) #0
168
169attributes #0 = { nounwind readnone speculatable }
170