xref: /llvm-project/llvm/test/CodeGen/AMDGPU/mad_uint24.ll (revision 9e9907f1cfa424366fba58d9520f9305b537cec9)
1; RUN: llc < %s -mtriple=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG --check-prefix=FUNC
2; RUN: llc < %s -mtriple=r600 -mcpu=cayman | FileCheck %s --check-prefix=EG --check-prefix=FUNC
3; RUN: llc < %s -mtriple=amdgcn -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC --check-prefix=GCN
4; RUN: llc < %s -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs | FileCheck %s --check-prefix=VI --check-prefix=FUNC --check-prefix=GCN --check-prefix=GCN2
5; RUN: llc < %s -mtriple=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs | FileCheck %s --check-prefix=VI --check-prefix=FUNC --check-prefix=GCN --check-prefix=GCN2
6
7declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
8
9; FUNC-LABEL: {{^}}u32_mad24:
10; EG: MULLO_INT
11; SI: s_mul_i32
12; SI: s_add_i32
13; VI: s_mul_{{[iu]}}32
14; VI: s_add_{{[iu]}}32
15
16define amdgpu_kernel void @u32_mad24(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c) {
17entry:
18  %0 = shl i32 %a, 8
19  %a_24 = lshr i32 %0, 8
20  %1 = shl i32 %b, 8
21  %b_24 = lshr i32 %1, 8
22  %2 = mul i32 %a_24, %b_24
23  %3 = add i32 %2, %c
24  store i32 %3, ptr addrspace(1) %out
25  ret void
26}
27
28; FUNC-LABEL: {{^}}i16_mad24:
29; The order of A and B does not matter.
30; EG: MULLO_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
31; EG: ADD_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
32; The result must be sign-extended
33; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
34; EG: 16
35; GCN:	s_mul_i32 [[MUL:s[0-9]]], {{[s][0-9], [s][0-9]}}
36; GCN:	s_add_i32 [[MAD:s[0-9]]], [[MUL]], s{{[0-9]}}
37; GCN:	s_sext_i32_i16 [[EXT:s[0-9]]], [[MAD]]
38; GCN:	v_mov_b32_e32 v0, [[EXT]]
39define amdgpu_kernel void @i16_mad24(ptr addrspace(1) %out, i16 %a, i16 %b, i16 %c) {
40entry:
41  %0 = mul i16 %a, %b
42  %1 = add i16 %0, %c
43  %2 = sext i16 %1 to i32
44  store i32 %2, ptr addrspace(1) %out
45  ret void
46}
47
48; FIXME: Need to handle non-uniform case for function below (load without gep).
49; FUNC-LABEL: {{^}}i8_mad24:
50; EG: MULLO_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
51; EG: ADD_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
52; The result must be sign-extended
53; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
54; EG: 8
55; GCN:	s_mul_i32 [[MUL:s[0-9]]], {{[s][0-9], [s][0-9]}}
56; GCN:	s_add_i32 [[MAD:s[0-9]]], [[MUL]], s{{[0-9]}}
57; GCN:	s_sext_i32_i8 [[EXT:s[0-9]]], [[MAD]]
58; GCN:	v_mov_b32_e32 v0, [[EXT]]
59define amdgpu_kernel void @i8_mad24(ptr addrspace(1) %out, i8 %a, i8 %b, i8 %c) {
60entry:
61  %0 = mul i8 %a, %b
62  %1 = add i8 %0, %c
63  %2 = sext i8 %1 to i32
64  store i32 %2, ptr addrspace(1) %out
65  ret void
66}
67
68; This tests for a bug where the mad_u24 pattern matcher would call
69; SimplifyDemandedBits on the first operand of the mul instruction
70; assuming that the pattern would be matched to a 24-bit mad.  This
71; led to some instructions being incorrectly erased when the entire
72; 24-bit mad pattern wasn't being matched.
73
74; Check that the select instruction is not deleted.
75; FUNC-LABEL: {{^}}i24_i32_i32_mad:
76; EG: CNDE_INT
77; SI: s_cselect
78; GCN2: s_cselect
79define amdgpu_kernel void @i24_i32_i32_mad(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c, i32 %d) {
80entry:
81  %0 = ashr i32 %a, 8
82  %1 = icmp ne i32 %c, 0
83  %2 = select i1 %1, i32 %0, i32 34
84  %3 = mul i32 %2, %c
85  %4 = add i32 %3, %d
86  store i32 %4, ptr addrspace(1) %out
87  ret void
88}
89
90; FUNC-LABEL: {{^}}extra_and:
91; SI-NOT: v_and
92; SI: s_mul_i32
93; SI: s_mul_i32
94; SI: s_add_i32
95; SI: s_add_i32
96define amdgpu_kernel void @extra_and(ptr addrspace(1) %arg, i32 %arg2, i32 %arg3) {
97bb:
98  br label %bb4
99
100bb4:                                              ; preds = %bb4, %bb
101  %tmp = phi i32 [ 0, %bb ], [ %tmp13, %bb4 ]
102  %tmp5 = phi i32 [ 0, %bb ], [ %tmp13, %bb4 ]
103  %tmp6 = phi i32 [ 0, %bb ], [ %tmp15, %bb4 ]
104  %tmp7 = phi i32 [ 0, %bb ], [ %tmp15, %bb4 ]
105  %tmp8 = and i32 %tmp7, 16777215
106  %tmp9 = and i32 %tmp6, 16777215
107  %tmp10 = and i32 %tmp5, 16777215
108  %tmp11 = and i32 %tmp, 16777215
109  %tmp12 = mul i32 %tmp8, %tmp11
110  %tmp13 = add i32 %arg2, %tmp12
111  %tmp14 = mul i32 %tmp9, %tmp11
112  %tmp15 = add i32 %arg3, %tmp14
113  %tmp16 = add nuw nsw i32 %tmp13, %tmp15
114  %tmp17 = icmp eq i32 %tmp16, 8
115  br i1 %tmp17, label %bb18, label %bb4
116
117bb18:                                             ; preds = %bb4
118  store i32 %tmp16, ptr addrspace(1) %arg
119  ret void
120}
121
122; FUNC-LABEL: {{^}}dont_remove_shift
123; SI: s_lshr
124; SI: s_mul_i32
125; SI: s_mul_i32
126; SI: s_add_i32
127; SI: s_add_i32
128define amdgpu_kernel void @dont_remove_shift(ptr addrspace(1) %arg, i32 %arg2, i32 %arg3) {
129bb:
130  br label %bb4
131
132bb4:                                              ; preds = %bb4, %bb
133  %tmp = phi i32 [ 0, %bb ], [ %tmp13, %bb4 ]
134  %tmp5 = phi i32 [ 0, %bb ], [ %tmp13, %bb4 ]
135  %tmp6 = phi i32 [ 0, %bb ], [ %tmp15, %bb4 ]
136  %tmp7 = phi i32 [ 0, %bb ], [ %tmp15, %bb4 ]
137  %tmp8 = lshr i32 %tmp7, 8
138  %tmp9 = lshr i32 %tmp6, 8
139  %tmp10 = lshr i32 %tmp5, 8
140  %tmp11 = lshr i32 %tmp, 8
141  %tmp12 = mul i32 %tmp8, %tmp11
142  %tmp13 = add i32 %arg2, %tmp12
143  %tmp14 = mul i32 %tmp9, %tmp11
144  %tmp15 = add i32 %arg3, %tmp14
145  %tmp16 = add nuw nsw i32 %tmp13, %tmp15
146  %tmp17 = icmp eq i32 %tmp16, 8
147  br i1 %tmp17, label %bb18, label %bb4
148
149bb18:                                             ; preds = %bb4
150  store i32 %tmp16, ptr addrspace(1) %arg
151  ret void
152}
153
154; FUNC-LABEL: {{^}}i8_mad_sat_16:
155; EG: MULLO_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
156; EG: ADD_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
157; The result must be sign-extended
158; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
159; EG: 8
160; SI: v_mad_u32_u24 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
161; SI: v_bfe_i32 [[EXT:v[0-9]]], [[MAD]], 0, 16
162; SI: v_med3_i32 v{{[0-9]}}, [[EXT]],
163; VI: v_mad_u16 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
164; VI: v_max_i16_e32 [[MAX:v[0-9]]], 0xff80, [[MAD]]
165; VI: v_min_i16_e32 {{v[0-9]}}, 0x7f, [[MAX]]
166define amdgpu_kernel void @i8_mad_sat_16(ptr addrspace(1) %out, ptr addrspace(1) %in0, ptr addrspace(1) %in1, ptr addrspace(1) %in2, ptr addrspace(5) %idx) {
167entry:
168  %retval.0.i = load i64, ptr addrspace(5) %idx
169  %arrayidx = getelementptr inbounds i8, ptr addrspace(1) %in0, i64 %retval.0.i
170  %arrayidx2 = getelementptr inbounds i8, ptr addrspace(1) %in1, i64 %retval.0.i
171  %arrayidx4 = getelementptr inbounds i8, ptr addrspace(1) %in2, i64 %retval.0.i
172  %l1 = load i8, ptr addrspace(1) %arrayidx, align 1
173  %l2 = load i8, ptr addrspace(1) %arrayidx2, align 1
174  %l3 = load i8, ptr addrspace(1) %arrayidx4, align 1
175  %conv1.i = sext i8 %l1 to i16
176  %conv3.i = sext i8 %l2 to i16
177  %conv5.i = sext i8 %l3 to i16
178  %mul.i.i.i = mul nsw i16 %conv3.i, %conv1.i
179  %add.i.i = add i16 %mul.i.i.i, %conv5.i
180  %c4 = icmp sgt i16 %add.i.i, -128
181  %cond.i.i = select i1 %c4, i16 %add.i.i, i16 -128
182  %c5 = icmp slt i16 %cond.i.i, 127
183  %cond13.i.i = select i1 %c5, i16 %cond.i.i, i16 127
184  %conv8.i = trunc i16 %cond13.i.i to i8
185  %arrayidx7 = getelementptr inbounds i8, ptr addrspace(1) %out, i64 %retval.0.i
186  store i8 %conv8.i, ptr addrspace(1) %arrayidx7, align 1
187  ret void
188}
189
190; FUNC-LABEL: {{^}}i8_mad_32:
191; EG: MULLO_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
192; EG: ADD_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
193; The result must be sign-extended
194; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
195; EG: 8
196; SI: v_mad_u32_u24 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
197; VI: v_mad_u16 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
198; GCN: v_bfe_i32 [[EXT:v[0-9]]], [[MAD]], 0, 16
199define amdgpu_kernel void @i8_mad_32(ptr addrspace(1) %out, ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(5) %idx) {
200entry:
201  %retval.0.i = load i64, ptr addrspace(5) %idx
202  %arrayidx = getelementptr inbounds i8, ptr addrspace(1) %a, i64 %retval.0.i
203  %arrayidx2 = getelementptr inbounds i8, ptr addrspace(1) %b, i64 %retval.0.i
204  %arrayidx4 = getelementptr inbounds i8, ptr addrspace(1) %c, i64 %retval.0.i
205  %la = load i8, ptr addrspace(1) %arrayidx, align 1
206  %lb = load i8, ptr addrspace(1) %arrayidx2, align 1
207  %lc = load i8, ptr addrspace(1) %arrayidx4, align 1
208  %exta = sext i8 %la to i16
209  %extb = sext i8 %lb to i16
210  %extc = sext i8 %lc to i16
211  %mul = mul i16 %exta, %extb
212  %mad = add i16 %mul, %extc
213  %mad_ext = sext i16 %mad to i32
214  store i32 %mad_ext, ptr addrspace(1) %out
215  ret void
216}
217
218; FUNC-LABEL: {{^}}i8_mad_64:
219; EG: MULLO_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
220; EG: ADD_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
221; The result must be sign-extended
222; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
223; EG: 8
224; SI: v_mad_u32_u24 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
225; VI: v_mad_u16 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
226; GCN: v_bfe_i32 [[EXT:v[0-9]]], [[MAD]], 0, 16
227define amdgpu_kernel void @i8_mad_64(ptr addrspace(1) %out, ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(5) %idx) {
228entry:
229  %retval.0.i = load i64, ptr addrspace(5) %idx
230  %arrayidx = getelementptr inbounds i8, ptr addrspace(1) %a, i64 %retval.0.i
231  %arrayidx2 = getelementptr inbounds i8, ptr addrspace(1) %b, i64 %retval.0.i
232  %arrayidx4 = getelementptr inbounds i8, ptr addrspace(1) %c, i64 %retval.0.i
233  %la = load i8, ptr addrspace(1) %arrayidx, align 1
234  %lb = load i8, ptr addrspace(1) %arrayidx2, align 1
235  %lc = load i8, ptr addrspace(1) %arrayidx4, align 1
236  %exta = sext i8 %la to i16
237  %extb = sext i8 %lb to i16
238  %extc = sext i8 %lc to i16
239  %mul = mul i16 %exta, %extb
240  %mad = add i16 %mul, %extc
241  %mad_ext = sext i16 %mad to i64
242  store i64 %mad_ext, ptr addrspace(1) %out
243  ret void
244}
245
246; The ands are asserting the high bits are 0. SimplifyDemandedBits on
247; the adds would remove the ands before the target combine on the mul
248; had a chance to form mul24. The mul combine would then see
249; extractelement with no known bits and fail. All of the mul/add
250; combos in this loop should form v_mad_u32_u24.
251
252; FUNC-LABEL: {{^}}mad24_known_bits_destroyed:
253; GCN: v_mad_u32_u24
254; GCN: v_mad_u32_u24
255; GCN: v_mad_u32_u24
256; GCN: v_mad_u32_u24
257; GCN: v_mad_u32_u24
258; GCN: v_mad_u32_u24
259; GCN: v_mad_u32_u24
260; GCN: v_mad_u32_u24
261define void @mad24_known_bits_destroyed(i32 %arg, <4 x i32> %arg1, <4 x i32> %arg2, <4 x i32> %arg3, i32 %arg4, i32 %arg5, i32 %arg6, ptr addrspace(1) %arg7, ptr addrspace(1) %arg8) #0 {
262bb:
263  %tmp = and i32 %arg4, 16777215
264  %tmp9 = extractelement <4 x i32> %arg1, i64 1
265  %tmp10 = extractelement <4 x i32> %arg3, i64 1
266  %tmp11 = and i32 %tmp9, 16777215
267  %tmp12 = extractelement <4 x i32> %arg1, i64 2
268  %tmp13 = extractelement <4 x i32> %arg3, i64 2
269  %tmp14 = and i32 %tmp12, 16777215
270  %tmp15 = extractelement <4 x i32> %arg1, i64 3
271  %tmp16 = extractelement <4 x i32> %arg3, i64 3
272  %tmp17 = and i32 %tmp15, 16777215
273  br label %bb19
274
275bb18:                                             ; preds = %bb19
276  ret void
277
278bb19:                                             ; preds = %bb19, %bb
279  %tmp20 = phi i32 [ %arg, %bb ], [ %tmp40, %bb19 ]
280  %tmp21 = phi i32 [ 0, %bb ], [ %tmp54, %bb19 ]
281  %tmp22 = phi <4 x i32> [ %arg2, %bb ], [ %tmp53, %bb19 ]
282  %tmp23 = and i32 %tmp20, 16777215
283  %tmp24 = mul i32 %tmp23, %tmp
284  %tmp25 = add i32 %tmp24, %arg5
285  %tmp26 = extractelement <4 x i32> %tmp22, i64 1
286  %tmp27 = and i32 %tmp26, 16777215
287  %tmp28 = mul i32 %tmp27, %tmp11
288  %tmp29 = add i32 %tmp28, %tmp10
289  %tmp30 = extractelement <4 x i32> %tmp22, i64 2
290  %tmp31 = and i32 %tmp30, 16777215
291  %tmp32 = mul i32 %tmp31, %tmp14
292  %tmp33 = add i32 %tmp32, %tmp13
293  %tmp34 = extractelement <4 x i32> %tmp22, i64 3
294  %tmp35 = and i32 %tmp34, 16777215
295  %tmp36 = mul i32 %tmp35, %tmp17
296  %tmp37 = add i32 %tmp36, %tmp16
297  %tmp38 = and i32 %tmp25, 16777215
298  %tmp39 = mul i32 %tmp38, %tmp
299  %tmp40 = add i32 %tmp39, %arg5
300  store i32 %tmp40, ptr addrspace(1) %arg7
301  %tmp41 = insertelement <4 x i32> undef, i32 %tmp40, i32 0
302  %tmp42 = and i32 %tmp29, 16777215
303  %tmp43 = mul i32 %tmp42, %tmp11
304  %tmp44 = add i32 %tmp43, %tmp10
305  %tmp45 = insertelement <4 x i32> %tmp41, i32 %tmp44, i32 1
306  %tmp46 = and i32 %tmp33, 16777215
307  %tmp47 = mul i32 %tmp46, %tmp14
308  %tmp48 = add i32 %tmp47, %tmp13
309  %tmp49 = insertelement <4 x i32> %tmp45, i32 %tmp48, i32 2
310  %tmp50 = and i32 %tmp37, 16777215
311  %tmp51 = mul i32 %tmp50, %tmp17
312  %tmp52 = add i32 %tmp51, %tmp16
313  %tmp53 = insertelement <4 x i32> %tmp49, i32 %tmp52, i32 3
314  store <4 x i32> %tmp53, ptr addrspace(1) %arg8
315  %tmp54 = add nuw nsw i32 %tmp21, 1
316  %tmp55 = icmp eq i32 %tmp54, %arg6
317  br i1 %tmp55, label %bb18, label %bb19
318}
319
320attributes #0 = { norecurse nounwind }
321