xref: /llvm-project/llvm/test/CodeGen/AMDGPU/and.ll (revision 229e11855983ead8c8e3d5421238dbd4acdf2d29)
1; RUN: llc -mtriple=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=SI -check-prefix=FUNC %s
2; RUN: llc -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global,-xnack -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=SI -check-prefix=FUNC %s
3; RUN: llc -mtriple=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
4
5declare i32 @llvm.amdgcn.workitem.id.x() #0
6
7; FUNC-LABEL: {{^}}test2:
8; EG: AND_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
9; EG: AND_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
10
11; SI: s_and_b32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
12; SI: s_and_b32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
13
14define amdgpu_kernel void @test2(ptr addrspace(1) %out, ptr addrspace(1) %in) {
15  %b_ptr = getelementptr <2 x i32>, ptr addrspace(1) %in, i32 1
16  %a = load <2 x i32>, ptr addrspace(1) %in
17  %b = load <2 x i32>, ptr addrspace(1) %b_ptr
18  %result = and <2 x i32> %a, %b
19  store <2 x i32> %result, ptr addrspace(1) %out
20  ret void
21}
22
23; FUNC-LABEL: {{^}}test4:
24; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
25; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
26; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
27; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
28
29
30; SI: s_and_b32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
31; SI: s_and_b32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
32; SI: s_and_b32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
33; SI: s_and_b32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
34
35define amdgpu_kernel void @test4(ptr addrspace(1) %out, ptr addrspace(1) %in) {
36  %b_ptr = getelementptr <4 x i32>, ptr addrspace(1) %in, i32 1
37  %a = load <4 x i32>, ptr addrspace(1) %in
38  %b = load <4 x i32>, ptr addrspace(1) %b_ptr
39  %result = and <4 x i32> %a, %b
40  store <4 x i32> %result, ptr addrspace(1) %out
41  ret void
42}
43
44; FUNC-LABEL: {{^}}s_and_i32:
45; SI: s_and_b32
46define amdgpu_kernel void @s_and_i32(ptr addrspace(1) %out, i32 %a, i32 %b) {
47  %and = and i32 %a, %b
48  store i32 %and, ptr addrspace(1) %out, align 4
49  ret void
50}
51
52; FUNC-LABEL: {{^}}s_and_constant_i32:
53; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x12d687
54define amdgpu_kernel void @s_and_constant_i32(ptr addrspace(1) %out, i32 %a) {
55  %and = and i32 %a, 1234567
56  store i32 %and, ptr addrspace(1) %out, align 4
57  ret void
58}
59
60; FIXME: We should really duplicate the constant so that the SALU use
61; can fold into the s_and_b32 and the VALU one is materialized
62; directly without copying from the SGPR.
63
64; Second use is a VGPR use of the constant.
65; FUNC-LABEL: {{^}}s_and_multi_use_constant_i32_0:
66; SI-DAG: s_and_b32 [[AND:s[0-9]+]], s{{[0-9]+}}, 0x12d687
67; SI-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], 0x12d687
68; SI: buffer_store_dword [[VK]]
69define amdgpu_kernel void @s_and_multi_use_constant_i32_0(ptr addrspace(1) %out, i32 %a, i32 %b) {
70  %and = and i32 %a, 1234567
71
72  ; Just to stop future replacement of copy to vgpr + store with VALU op.
73  %foo = add i32 %and, %b
74  store volatile i32 %foo, ptr addrspace(1) %out
75  store volatile i32 1234567, ptr addrspace(1) %out
76  ret void
77}
78
79; Second use is another SGPR use of the constant.
80; FUNC-LABEL: {{^}}s_and_multi_use_constant_i32_1:
81; SI: s_and_b32 [[AND:s[0-9]+]], s{{[0-9]+}}, 0x12d687
82; SI: s_add_i32
83; SI: s_add_i32 [[ADD:s[0-9]+]], s{{[0-9]+}}, 0x12d687
84; SI: v_mov_b32_e32 [[VADD:v[0-9]+]], [[ADD]]
85; SI: buffer_store_dword [[VADD]]
86define amdgpu_kernel void @s_and_multi_use_constant_i32_1(ptr addrspace(1) %out, i32 %a, i32 %b) {
87  %and = and i32 %a, 1234567
88  %foo = add i32 %and, 1234567
89  %bar = add i32 %foo, %b
90  store volatile i32 %bar, ptr addrspace(1) %out
91  ret void
92}
93
94; FUNC-LABEL: {{^}}v_and_i32_vgpr_vgpr:
95; SI: v_and_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
96define amdgpu_kernel void @v_and_i32_vgpr_vgpr(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr) {
97  %tid = call i32 @llvm.amdgcn.workitem.id.x() #0
98  %gep.a = getelementptr i32, ptr addrspace(1) %aptr, i32 %tid
99  %gep.b = getelementptr i32, ptr addrspace(1) %bptr, i32 %tid
100  %gep.out = getelementptr i32, ptr addrspace(1) %out, i32 %tid
101  %a = load i32, ptr addrspace(1) %gep.a
102  %b = load i32, ptr addrspace(1) %gep.b
103  %and = and i32 %a, %b
104  store i32 %and, ptr addrspace(1) %gep.out
105  ret void
106}
107
108; FUNC-LABEL: {{^}}v_and_i32_sgpr_vgpr:
109; SI-DAG: s_load_dword [[SA:s[0-9]+]]
110; SI-DAG: {{buffer|flat}}_load_dword [[VB:v[0-9]+]]
111; SI: v_and_b32_e32 v{{[0-9]+}}, [[SA]], [[VB]]
112define amdgpu_kernel void @v_and_i32_sgpr_vgpr(ptr addrspace(1) %out, i32 %a, ptr addrspace(1) %bptr) {
113  %tid = call i32 @llvm.amdgcn.workitem.id.x() #0
114  %gep.b = getelementptr i32, ptr addrspace(1) %bptr, i32 %tid
115  %gep.out = getelementptr i32, ptr addrspace(1) %out, i32 %tid
116  %b = load i32, ptr addrspace(1) %gep.b
117  %and = and i32 %a, %b
118  store i32 %and, ptr addrspace(1) %gep.out
119  ret void
120}
121
122; FUNC-LABEL: {{^}}v_and_i32_vgpr_sgpr:
123; SI-DAG: s_load_dword [[SA:s[0-9]+]]
124; SI-DAG: {{buffer|flat}}_load_dword [[VB:v[0-9]+]]
125; SI: v_and_b32_e32 v{{[0-9]+}}, [[SA]], [[VB]]
126define amdgpu_kernel void @v_and_i32_vgpr_sgpr(ptr addrspace(1) %out, ptr addrspace(1) %aptr, i32 %b) {
127  %tid = call i32 @llvm.amdgcn.workitem.id.x() #0
128  %gep.a = getelementptr i32, ptr addrspace(1) %aptr, i32 %tid
129  %gep.out = getelementptr i32, ptr addrspace(1) %out, i32 %tid
130  %a = load i32, ptr addrspace(1) %gep.a
131  %and = and i32 %a, %b
132  store i32 %and, ptr addrspace(1) %gep.out
133  ret void
134}
135
136; FUNC-LABEL: {{^}}v_and_constant_i32
137; SI: v_and_b32_e32 v{{[0-9]+}}, 0x12d687, v{{[0-9]+}}
138define amdgpu_kernel void @v_and_constant_i32(ptr addrspace(1) %out, ptr addrspace(1) %aptr) {
139  %tid = call i32 @llvm.amdgcn.workitem.id.x() #0
140  %gep = getelementptr i32, ptr addrspace(1) %aptr, i32 %tid
141  %a = load i32, ptr addrspace(1) %gep, align 4
142  %and = and i32 %a, 1234567
143  store i32 %and, ptr addrspace(1) %out, align 4
144  ret void
145}
146
147; FUNC-LABEL: {{^}}v_and_inline_imm_64_i32
148; SI: v_and_b32_e32 v{{[0-9]+}}, 64, v{{[0-9]+}}
149define amdgpu_kernel void @v_and_inline_imm_64_i32(ptr addrspace(1) %out, ptr addrspace(1) %aptr) {
150  %tid = call i32 @llvm.amdgcn.workitem.id.x() #0
151  %gep = getelementptr i32, ptr addrspace(1) %aptr, i32 %tid
152  %a = load i32, ptr addrspace(1) %gep, align 4
153  %and = and i32 %a, 64
154  store i32 %and, ptr addrspace(1) %out, align 4
155  ret void
156}
157
158; FUNC-LABEL: {{^}}v_and_inline_imm_neg_16_i32
159; SI: v_and_b32_e32 v{{[0-9]+}}, -16, v{{[0-9]+}}
160define amdgpu_kernel void @v_and_inline_imm_neg_16_i32(ptr addrspace(1) %out, ptr addrspace(1) %aptr) {
161  %tid = call i32 @llvm.amdgcn.workitem.id.x() #0
162  %gep = getelementptr i32, ptr addrspace(1) %aptr, i32 %tid
163  %a = load i32, ptr addrspace(1) %gep, align 4
164  %and = and i32 %a, -16
165  store i32 %and, ptr addrspace(1) %out, align 4
166  ret void
167}
168
169; FUNC-LABEL: {{^}}s_and_i64
170; SI: s_and_b64
171define amdgpu_kernel void @s_and_i64(ptr addrspace(1) %out, i64 %a, i64 %b) {
172  %and = and i64 %a, %b
173  store i64 %and, ptr addrspace(1) %out, align 8
174  ret void
175}
176
177; FUNC-LABEL: {{^}}s_and_i1:
178; SI: s_load_dword [[LOAD:s[0-9]+]]
179; SI: s_lshr_b32 [[B_SHIFT:s[0-9]+]], [[LOAD]], 8
180; SI: s_and_b32 [[AND:s[0-9]+]], [[LOAD]], [[B_SHIFT]]
181; SI: s_and_b32 [[AND_TRUNC:s[0-9]+]], [[AND]], 1{{$}}
182; SI: v_mov_b32_e32 [[V_AND_TRUNC:v[0-9]+]], [[AND_TRUNC]]
183; SI: buffer_store_byte [[V_AND_TRUNC]]
184define amdgpu_kernel void @s_and_i1(ptr addrspace(1) %out, i1 %a, i1 %b) {
185  %and = and i1 %a, %b
186  store i1 %and, ptr addrspace(1) %out
187  ret void
188}
189
190; FUNC-LABEL: {{^}}s_and_constant_i64:
191; SI-DAG: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80000{{$}}
192; SI-DAG: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80{{$}}
193; SI: buffer_store_dwordx2
194define amdgpu_kernel void @s_and_constant_i64(ptr addrspace(1) %out, i64 %a) {
195  %and = and i64 %a, 549756338176
196  store i64 %and, ptr addrspace(1) %out, align 8
197  ret void
198}
199
200; FUNC-LABEL: {{^}}s_and_multi_use_constant_i64:
201; XSI-DAG: s_mov_b32 s[[KLO:[0-9]+]], 0x80000{{$}}
202; XSI-DAG: s_mov_b32 s[[KHI:[0-9]+]], 0x80{{$}}
203; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s[[[KLO]]:[[KHI]]]
204define amdgpu_kernel void @s_and_multi_use_constant_i64(ptr addrspace(1) %out, i64 %a, i64 %b) {
205  %and0 = and i64 %a, 549756338176
206  %and1 = and i64 %b, 549756338176
207  store volatile i64 %and0, ptr addrspace(1) %out
208  store volatile i64 %and1, ptr addrspace(1) %out
209  ret void
210}
211
212; FUNC-LABEL: {{^}}s_and_32_bit_constant_i64:
213; SI: s_load_dwordx2
214; SI-NOT: and
215; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x12d687{{$}}
216; SI-NOT: and
217; SI: buffer_store_dwordx2
218define amdgpu_kernel void @s_and_32_bit_constant_i64(ptr addrspace(1) %out, i32, i64 %a) {
219  %and = and i64 %a, 1234567
220  store i64 %and, ptr addrspace(1) %out, align 8
221  ret void
222}
223
224; FUNC-LABEL: {{^}}s_and_multi_use_inline_imm_i64:
225; SI: s_load_dwordx2
226; SI: s_load_dword [[A:s[0-9]+]]
227; SI: s_load_dword [[B:s[0-9]+]]
228; SI: s_load_dwordx2
229; SI-NOT: and
230; SI: s_lshl_b32 [[C:s[0-9]+]], [[A]], 1
231; SI: s_lshl_b32 [[D:s[0-9]+]], [[B]], 1
232; SI: s_and_b32 s{{[0-9]+}}, [[C]], 62
233; SI: s_and_b32 s{{[0-9]+}}, [[D]], 62
234; SI-NOT: and
235; SI: buffer_store_dwordx2
236define amdgpu_kernel void @s_and_multi_use_inline_imm_i64(ptr addrspace(1) %out, i32, i64 %a, i32, i64 %b, i32, i64 %c) {
237  %shl.a = shl i64 %a, 1
238  %shl.b = shl i64 %b, 1
239  %and0 = and i64 %shl.a, 62
240  %and1 = and i64 %shl.b, 62
241  %add0 = add i64 %and0, %c
242  %add1 = add i64 %and1, %c
243  store volatile i64 %add0, ptr addrspace(1) %out
244  store volatile i64 %add1, ptr addrspace(1) %out
245  ret void
246}
247
248; FUNC-LABEL: {{^}}v_and_i64:
249; SI: v_and_b32
250; SI: v_and_b32
251define amdgpu_kernel void @v_and_i64(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr) {
252  %tid = call i32 @llvm.amdgcn.workitem.id.x() #0
253  %gep.a = getelementptr i64, ptr addrspace(1) %aptr, i32 %tid
254  %a = load i64, ptr addrspace(1) %gep.a, align 8
255  %gep.b = getelementptr i64, ptr addrspace(1) %bptr, i32 %tid
256  %b = load i64, ptr addrspace(1) %gep.b, align 8
257  %and = and i64 %a, %b
258  store i64 %and, ptr addrspace(1) %out, align 8
259  ret void
260}
261
262; FUNC-LABEL: {{^}}v_and_constant_i64:
263; SI-DAG: v_and_b32_e32 {{v[0-9]+}}, 0xab19b207, {{v[0-9]+}}
264; SI-DAG: v_and_b32_e32 {{v[0-9]+}}, 0x11e, {{v[0-9]+}}
265; SI: buffer_store_dwordx2
266define amdgpu_kernel void @v_and_constant_i64(ptr addrspace(1) %out, ptr addrspace(1) %aptr) {
267  %tid = call i32 @llvm.amdgcn.workitem.id.x() #0
268  %gep.a = getelementptr i64, ptr addrspace(1) %aptr, i32 %tid
269  %a = load i64, ptr addrspace(1) %gep.a, align 8
270  %and = and i64 %a, 1231231234567
271  store i64 %and, ptr addrspace(1) %out, align 8
272  ret void
273}
274
275; FUNC-LABEL: {{^}}v_and_multi_use_constant_i64:
276; SI-DAG: buffer_load_dwordx2 v[[[LO0:[0-9]+]]:[[HI0:[0-9]+]]]
277; SI-DAG: buffer_load_dwordx2 v[[[LO1:[0-9]+]]:[[HI1:[0-9]+]]]
278; SI-DAG: v_and_b32_e32 {{v[0-9]+}}, 0xab19b207, v[[LO0]]
279; SI-DAG: v_and_b32_e32 {{v[0-9]+}}, 0x11e, v[[HI0]]
280; SI-DAG: v_and_b32_e32 {{v[0-9]+}}, 0xab19b207, v[[LO1]]
281; SI-DAG: v_and_b32_e32 {{v[0-9]+}}, 0x11e, v[[HI1]]
282; SI: buffer_store_dwordx2
283; SI: buffer_store_dwordx2
284define amdgpu_kernel void @v_and_multi_use_constant_i64(ptr addrspace(1) %out, ptr addrspace(1) %aptr) {
285  %a = load volatile i64, ptr addrspace(1) %aptr
286  %b = load volatile i64, ptr addrspace(1) %aptr
287  %and0 = and i64 %a, 1231231234567
288  %and1 = and i64 %b, 1231231234567
289  store volatile i64 %and0, ptr addrspace(1) %out
290  store volatile i64 %and1, ptr addrspace(1) %out
291  ret void
292}
293
294; FUNC-LABEL: {{^}}v_and_multi_use_inline_imm_i64:
295; SI: buffer_load_dwordx2 v[[[LO0:[0-9]+]]:[[HI0:[0-9]+]]]
296; SI-NOT: and
297; SI: buffer_load_dwordx2 v[[[LO1:[0-9]+]]:[[HI1:[0-9]+]]]
298; SI-NOT: and
299; SI: v_and_b32_e32 v[[RESLO0:[0-9]+]], 63, v[[LO0]]
300; SI: v_and_b32_e32 v[[RESLO1:[0-9]+]], 63, v[[LO1]]
301; SI-NOT: and
302; SI: buffer_store_dwordx2 v[[[RESLO0]]
303; SI: buffer_store_dwordx2 v[[[RESLO1]]
304define amdgpu_kernel void @v_and_multi_use_inline_imm_i64(ptr addrspace(1) %out, ptr addrspace(1) %aptr) {
305  %a = load volatile i64, ptr addrspace(1) %aptr
306  %b = load volatile i64, ptr addrspace(1) %aptr
307  %and0 = and i64 %a, 63
308  %and1 = and i64 %b, 63
309  store volatile i64 %and0, ptr addrspace(1) %out
310  store volatile i64 %and1, ptr addrspace(1) %out
311  ret void
312}
313
314; FUNC-LABEL: {{^}}v_and_i64_32_bit_constant:
315; SI: {{buffer|flat}}_load_dword [[VAL:v[0-9]+]]
316; SI-NOT: and
317; SI: v_and_b32_e32 {{v[0-9]+}}, 0x12d687, [[VAL]]
318; SI-NOT: and
319; SI: buffer_store_dwordx2
320define amdgpu_kernel void @v_and_i64_32_bit_constant(ptr addrspace(1) %out, ptr addrspace(1) %aptr) {
321  %tid = call i32 @llvm.amdgcn.workitem.id.x() #0
322  %gep.a = getelementptr i64, ptr addrspace(1) %aptr, i32 %tid
323  %a = load i64, ptr addrspace(1) %gep.a, align 8
324  %and = and i64 %a, 1234567
325  store i64 %and, ptr addrspace(1) %out, align 8
326  ret void
327}
328
329; FUNC-LABEL: {{^}}v_and_inline_imm_i64:
330; SI: {{buffer|flat}}_load_dword v{{[0-9]+}}
331; SI-NOT: and
332; SI: v_and_b32_e32 {{v[0-9]+}}, 64, {{v[0-9]+}}
333; SI-NOT: and
334; SI: buffer_store_dwordx2
335define amdgpu_kernel void @v_and_inline_imm_i64(ptr addrspace(1) %out, ptr addrspace(1) %aptr) {
336  %tid = call i32 @llvm.amdgcn.workitem.id.x() #0
337  %gep.a = getelementptr i64, ptr addrspace(1) %aptr, i32 %tid
338  %a = load i64, ptr addrspace(1) %gep.a, align 8
339  %and = and i64 %a, 64
340  store i64 %and, ptr addrspace(1) %out, align 8
341  ret void
342}
343
344; FIXME: Should be able to reduce load width
345; FUNC-LABEL: {{^}}v_and_inline_neg_imm_i64:
346; SI: {{buffer|flat}}_load_dwordx2 v[[[VAL_LO:[0-9]+]]:[[VAL_HI:[0-9]+]]]
347; SI-NOT: and
348; SI: v_and_b32_e32 v[[VAL_LO]], -8, v[[VAL_LO]]
349; SI-NOT: and
350; SI: buffer_store_dwordx2 v[[[VAL_LO]]:[[VAL_HI]]]
351define amdgpu_kernel void @v_and_inline_neg_imm_i64(ptr addrspace(1) %out, ptr addrspace(1) %aptr) {
352  %tid = call i32 @llvm.amdgcn.workitem.id.x() #0
353  %gep.a = getelementptr i64, ptr addrspace(1) %aptr, i32 %tid
354  %a = load i64, ptr addrspace(1) %gep.a, align 8
355  %and = and i64 %a, -8
356  store i64 %and, ptr addrspace(1) %out, align 8
357  ret void
358}
359
360; FUNC-LABEL: {{^}}s_and_inline_imm_64_i64
361; SI: s_load_dword
362; SI-NOT: and
363; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 64
364; SI-NOT: and
365; SI: buffer_store_dword
366define amdgpu_kernel void @s_and_inline_imm_64_i64(ptr addrspace(1) %out, ptr addrspace(1) %aptr, i64 %a) {
367  %and = and i64 %a, 64
368  store i64 %and, ptr addrspace(1) %out, align 8
369  ret void
370}
371
372; FUNC-LABEL: {{^}}s_and_inline_imm_64_i64_noshrink:
373; SI: s_load_dword [[A:s[0-9]+]]
374; SI: s_lshl_b32 [[B:s[0-9]+]], [[A]], 1{{$}}
375; SI-NOT: and
376; SI: s_and_b32 s{{[0-9]+}}, [[B]], 64
377; SI-NOT: and
378; SI: s_add_u32
379; SI-NEXT: s_addc_u32
380define amdgpu_kernel void @s_and_inline_imm_64_i64_noshrink(ptr addrspace(1) %out, ptr addrspace(1) %aptr, i64 %a, i32, i64 %b) {
381  %shl = shl i64 %a, 1
382  %and = and i64 %shl, 64
383  %add = add i64 %and, %b
384  store i64 %add, ptr addrspace(1) %out, align 8
385  ret void
386}
387
388; FUNC-LABEL: {{^}}s_and_inline_imm_1_i64
389; SI: s_load_dwordx2
390; SI-NOT: and
391; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 1
392; SI-NOT: and
393; SI: buffer_store_dwordx2
394define amdgpu_kernel void @s_and_inline_imm_1_i64(ptr addrspace(1) %out, ptr addrspace(1) %aptr, i64 %a) {
395  %and = and i64 %a, 1
396  store i64 %and, ptr addrspace(1) %out, align 8
397  ret void
398}
399
400; FUNC-LABEL: {{^}}s_and_inline_imm_1.0_i64
401; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 1.0
402
403; SI: s_load_dword
404; SI: s_load_dwordx2
405; SI-NOT: and
406; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0x3ff00000
407; SI-NOT: and
408; SI: buffer_store_dwordx2
409define amdgpu_kernel void @s_and_inline_imm_1.0_i64(ptr addrspace(1) %out, ptr addrspace(1) %aptr, i64 %a) {
410  %and = and i64 %a, 4607182418800017408
411  store i64 %and, ptr addrspace(1) %out, align 8
412  ret void
413}
414
415; FUNC-LABEL: {{^}}s_and_inline_imm_neg_1.0_i64
416; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, -1.0
417
418; SI: s_load_dword
419; SI: s_load_dwordx2
420; SI-NOT: and
421; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0xbff00000
422; SI-NOT: and
423; SI: buffer_store_dwordx2
424define amdgpu_kernel void @s_and_inline_imm_neg_1.0_i64(ptr addrspace(1) %out, ptr addrspace(1) %aptr, i64 %a) {
425  %and = and i64 %a, 13830554455654793216
426  store i64 %and, ptr addrspace(1) %out, align 8
427  ret void
428}
429
430; FUNC-LABEL: {{^}}s_and_inline_imm_0.5_i64
431; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0.5
432
433; SI: s_load_dword
434; SI: s_load_dwordx2
435; SI-NOT: and
436; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0x3fe00000
437; SI-NOT: and
438; SI: buffer_store_dwordx2
439define amdgpu_kernel void @s_and_inline_imm_0.5_i64(ptr addrspace(1) %out, ptr addrspace(1) %aptr, i64 %a) {
440  %and = and i64 %a, 4602678819172646912
441  store i64 %and, ptr addrspace(1) %out, align 8
442  ret void
443}
444
445; FUNC-LABEL: {{^}}s_and_inline_imm_neg_0.5_i64:
446; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, -0.5
447
448; SI: s_load_dword
449; SI: s_load_dwordx2
450; SI-NOT: and
451; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0xbfe00000
452; SI-NOT: and
453; SI: buffer_store_dwordx2
454define amdgpu_kernel void @s_and_inline_imm_neg_0.5_i64(ptr addrspace(1) %out, ptr addrspace(1) %aptr, i64 %a) {
455  %and = and i64 %a, 13826050856027422720
456  store i64 %and, ptr addrspace(1) %out, align 8
457  ret void
458}
459
460; FUNC-LABEL: {{^}}s_and_inline_imm_2.0_i64:
461; SI: s_load_dword
462; SI: s_load_dwordx2
463; SI-NOT: and
464; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 2.0
465; SI-NOT: and
466; SI: buffer_store_dwordx2
467define amdgpu_kernel void @s_and_inline_imm_2.0_i64(ptr addrspace(1) %out, ptr addrspace(1) %aptr, i64 %a) {
468  %and = and i64 %a, 4611686018427387904
469  store i64 %and, ptr addrspace(1) %out, align 8
470  ret void
471}
472
473; FUNC-LABEL: {{^}}s_and_inline_imm_neg_2.0_i64:
474; SI: s_load_dword
475; SI: s_load_dwordx2
476; SI-NOT: and
477; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, -2.0
478; SI-NOT: and
479; SI: buffer_store_dwordx2
480define amdgpu_kernel void @s_and_inline_imm_neg_2.0_i64(ptr addrspace(1) %out, ptr addrspace(1) %aptr, i64 %a) {
481  %and = and i64 %a, 13835058055282163712
482  store i64 %and, ptr addrspace(1) %out, align 8
483  ret void
484}
485
486; FUNC-LABEL: {{^}}s_and_inline_imm_4.0_i64:
487; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 4.0
488
489; SI: s_load_dword
490; SI: s_load_dwordx2
491; SI-NOT: and
492; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0x40100000
493; SI-NOT: and
494; SI: buffer_store_dwordx2
495define amdgpu_kernel void @s_and_inline_imm_4.0_i64(ptr addrspace(1) %out, ptr addrspace(1) %aptr, i64 %a) {
496  %and = and i64 %a, 4616189618054758400
497  store i64 %and, ptr addrspace(1) %out, align 8
498  ret void
499}
500
501; FUNC-LABEL: {{^}}s_and_inline_imm_neg_4.0_i64:
502; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, -4.0
503
504; SI: s_load_dword
505; SI: s_load_dwordx2
506; SI-NOT: and
507; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0xc0100000
508; SI-NOT: and
509; SI: buffer_store_dwordx2
510define amdgpu_kernel void @s_and_inline_imm_neg_4.0_i64(ptr addrspace(1) %out, ptr addrspace(1) %aptr, i64 %a) {
511  %and = and i64 %a, 13839561654909534208
512  store i64 %and, ptr addrspace(1) %out, align 8
513  ret void
514}
515
516
517; Test with the 64-bit integer bitpattern for a 32-bit float in the
518; low 32-bits, which is not a valid 64-bit inline immmediate.
519
520; FUNC-LABEL: {{^}}s_and_inline_imm_f32_4.0_i64:
521; SI: s_load_dword s
522; SI: s_load_dwordx2
523; SI-NOT: and
524; SI: s_and_b32 s[[K_HI:[0-9]+]], s{{[0-9]+}}, 4.0
525; SI-NOT: and
526; SI: buffer_store_dwordx2
527define amdgpu_kernel void @s_and_inline_imm_f32_4.0_i64(ptr addrspace(1) %out, ptr addrspace(1) %aptr, i64 %a) {
528  %and = and i64 %a, 1082130432
529  store i64 %and, ptr addrspace(1) %out, align 8
530  ret void
531}
532
533; FUNC-LABEL: {{^}}s_and_inline_imm_f32_neg_4.0_i64:
534; SI: s_load_dwordx2
535; SI: s_load_dwordx2
536; SI-NOT: and
537; SI: s_and_b32 s[[K_HI:[0-9]+]], s{{[0-9]+}}, -4.0
538; SI-NOT: and
539; SI: buffer_store_dwordx2
540define amdgpu_kernel void @s_and_inline_imm_f32_neg_4.0_i64(ptr addrspace(1) %out, ptr addrspace(1) %aptr, i64 %a) {
541  %and = and i64 %a, -1065353216
542  store i64 %and, ptr addrspace(1) %out, align 8
543  ret void
544}
545
546; Shift into upper 32-bits
547; SI: s_load_dword
548; SI: s_load_dwordx2
549; SI-NOT: and
550; SI: s_and_b32 s[[K_HI:[0-9]+]], s{{[0-9]+}}, 4.0
551; SI-NOT: and
552; SI: buffer_store_dwordx2
553define amdgpu_kernel void @s_and_inline_high_imm_f32_4.0_i64(ptr addrspace(1) %out, ptr addrspace(1) %aptr, i64 %a) {
554  %and = and i64 %a, 4647714815446351872
555  store i64 %and, ptr addrspace(1) %out, align 8
556  ret void
557}
558
559; FUNC-LABEL: {{^}}s_and_inline_high_imm_f32_neg_4.0_i64:
560; SI: s_load_dword
561; SI: s_load_dwordx2
562; SI-NOT: and
563; SI: s_and_b32 s[[K_HI:[0-9]+]], s{{[0-9]+}}, -4.0
564; SI-NOT: and
565; SI: buffer_store_dwordx2
566define amdgpu_kernel void @s_and_inline_high_imm_f32_neg_4.0_i64(ptr addrspace(1) %out, ptr addrspace(1) %aptr, i64 %a) {
567  %and = and i64 %a, 13871086852301127680
568  store i64 %and, ptr addrspace(1) %out, align 8
569  ret void
570}
571attributes #0 = { nounwind readnone }
572