xref: /llvm-project/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.store.2d.ll (revision 3277c7cd28154e33637a168acb26cea7ac1f7fff)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -o - %s | FileCheck -check-prefix=GFX6 %s
3; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -o - %s | FileCheck -check-prefix=GFX8 %s
4; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -o - %s | FileCheck -check-prefix=GFX10 %s
5; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -o - %s | FileCheck -check-prefix=GFX11 %s
6; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -o - %s | FileCheck -check-prefix=GFX12 %s
7
8define amdgpu_ps void @image_store_f32(<8 x i32> inreg %rsrc, i32 %s, i32 %t, float %data) {
9; GFX6-LABEL: image_store_f32:
10; GFX6:       ; %bb.0:
11; GFX6-NEXT:    s_mov_b32 s0, s2
12; GFX6-NEXT:    s_mov_b32 s1, s3
13; GFX6-NEXT:    s_mov_b32 s2, s4
14; GFX6-NEXT:    s_mov_b32 s3, s5
15; GFX6-NEXT:    s_mov_b32 s4, s6
16; GFX6-NEXT:    s_mov_b32 s5, s7
17; GFX6-NEXT:    s_mov_b32 s6, s8
18; GFX6-NEXT:    s_mov_b32 s7, s9
19; GFX6-NEXT:    image_store v2, v[0:1], s[0:7] dmask:0x1 unorm
20; GFX6-NEXT:    s_endpgm
21;
22; GFX8-LABEL: image_store_f32:
23; GFX8:       ; %bb.0:
24; GFX8-NEXT:    s_mov_b32 s0, s2
25; GFX8-NEXT:    s_mov_b32 s1, s3
26; GFX8-NEXT:    s_mov_b32 s2, s4
27; GFX8-NEXT:    s_mov_b32 s3, s5
28; GFX8-NEXT:    s_mov_b32 s4, s6
29; GFX8-NEXT:    s_mov_b32 s5, s7
30; GFX8-NEXT:    s_mov_b32 s6, s8
31; GFX8-NEXT:    s_mov_b32 s7, s9
32; GFX8-NEXT:    image_store v2, v[0:1], s[0:7] dmask:0x1 unorm
33; GFX8-NEXT:    s_endpgm
34;
35; GFX10-LABEL: image_store_f32:
36; GFX10:       ; %bb.0:
37; GFX10-NEXT:    s_mov_b32 s0, s2
38; GFX10-NEXT:    s_mov_b32 s1, s3
39; GFX10-NEXT:    s_mov_b32 s2, s4
40; GFX10-NEXT:    s_mov_b32 s3, s5
41; GFX10-NEXT:    s_mov_b32 s4, s6
42; GFX10-NEXT:    s_mov_b32 s5, s7
43; GFX10-NEXT:    s_mov_b32 s6, s8
44; GFX10-NEXT:    s_mov_b32 s7, s9
45; GFX10-NEXT:    image_store v2, v[0:1], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D unorm
46; GFX10-NEXT:    s_endpgm
47;
48; GFX11-LABEL: image_store_f32:
49; GFX11:       ; %bb.0:
50; GFX11-NEXT:    s_mov_b32 s0, s2
51; GFX11-NEXT:    s_mov_b32 s1, s3
52; GFX11-NEXT:    s_mov_b32 s2, s4
53; GFX11-NEXT:    s_mov_b32 s3, s5
54; GFX11-NEXT:    s_mov_b32 s4, s6
55; GFX11-NEXT:    s_mov_b32 s5, s7
56; GFX11-NEXT:    s_mov_b32 s6, s8
57; GFX11-NEXT:    s_mov_b32 s7, s9
58; GFX11-NEXT:    image_store v2, v[0:1], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D unorm
59; GFX11-NEXT:    s_endpgm
60;
61; GFX12-LABEL: image_store_f32:
62; GFX12:       ; %bb.0:
63; GFX12-NEXT:    s_mov_b32 s0, s2
64; GFX12-NEXT:    s_mov_b32 s1, s3
65; GFX12-NEXT:    s_mov_b32 s2, s4
66; GFX12-NEXT:    s_mov_b32 s3, s5
67; GFX12-NEXT:    s_mov_b32 s4, s6
68; GFX12-NEXT:    s_mov_b32 s5, s7
69; GFX12-NEXT:    s_mov_b32 s6, s8
70; GFX12-NEXT:    s_mov_b32 s7, s9
71; GFX12-NEXT:    image_store v2, [v0, v1], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
72; GFX12-NEXT:    s_endpgm
73  call void @llvm.amdgcn.image.store.2d.f32.i32(float %data, i32 1, i32 %s, i32 %t, <8 x i32> %rsrc, i32 0, i32 0)
74  ret void
75}
76
77define amdgpu_ps void @image_store_v2f32(<8 x i32> inreg %rsrc, i32 %s, i32 %t, <2 x float> %in) {
78; GFX6-LABEL: image_store_v2f32:
79; GFX6:       ; %bb.0:
80; GFX6-NEXT:    s_mov_b32 s0, s2
81; GFX6-NEXT:    s_mov_b32 s1, s3
82; GFX6-NEXT:    s_mov_b32 s2, s4
83; GFX6-NEXT:    s_mov_b32 s3, s5
84; GFX6-NEXT:    s_mov_b32 s4, s6
85; GFX6-NEXT:    s_mov_b32 s5, s7
86; GFX6-NEXT:    s_mov_b32 s6, s8
87; GFX6-NEXT:    s_mov_b32 s7, s9
88; GFX6-NEXT:    image_store v[2:3], v[0:1], s[0:7] dmask:0x3 unorm
89; GFX6-NEXT:    s_endpgm
90;
91; GFX8-LABEL: image_store_v2f32:
92; GFX8:       ; %bb.0:
93; GFX8-NEXT:    s_mov_b32 s0, s2
94; GFX8-NEXT:    s_mov_b32 s1, s3
95; GFX8-NEXT:    s_mov_b32 s2, s4
96; GFX8-NEXT:    s_mov_b32 s3, s5
97; GFX8-NEXT:    s_mov_b32 s4, s6
98; GFX8-NEXT:    s_mov_b32 s5, s7
99; GFX8-NEXT:    s_mov_b32 s6, s8
100; GFX8-NEXT:    s_mov_b32 s7, s9
101; GFX8-NEXT:    image_store v[2:3], v[0:1], s[0:7] dmask:0x3 unorm
102; GFX8-NEXT:    s_endpgm
103;
104; GFX10-LABEL: image_store_v2f32:
105; GFX10:       ; %bb.0:
106; GFX10-NEXT:    s_mov_b32 s0, s2
107; GFX10-NEXT:    s_mov_b32 s1, s3
108; GFX10-NEXT:    s_mov_b32 s2, s4
109; GFX10-NEXT:    s_mov_b32 s3, s5
110; GFX10-NEXT:    s_mov_b32 s4, s6
111; GFX10-NEXT:    s_mov_b32 s5, s7
112; GFX10-NEXT:    s_mov_b32 s6, s8
113; GFX10-NEXT:    s_mov_b32 s7, s9
114; GFX10-NEXT:    image_store v[2:3], v[0:1], s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_2D unorm
115; GFX10-NEXT:    s_endpgm
116;
117; GFX11-LABEL: image_store_v2f32:
118; GFX11:       ; %bb.0:
119; GFX11-NEXT:    s_mov_b32 s0, s2
120; GFX11-NEXT:    s_mov_b32 s1, s3
121; GFX11-NEXT:    s_mov_b32 s2, s4
122; GFX11-NEXT:    s_mov_b32 s3, s5
123; GFX11-NEXT:    s_mov_b32 s4, s6
124; GFX11-NEXT:    s_mov_b32 s5, s7
125; GFX11-NEXT:    s_mov_b32 s6, s8
126; GFX11-NEXT:    s_mov_b32 s7, s9
127; GFX11-NEXT:    image_store v[2:3], v[0:1], s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_2D unorm
128; GFX11-NEXT:    s_endpgm
129;
130; GFX12-LABEL: image_store_v2f32:
131; GFX12:       ; %bb.0:
132; GFX12-NEXT:    s_mov_b32 s0, s2
133; GFX12-NEXT:    s_mov_b32 s1, s3
134; GFX12-NEXT:    s_mov_b32 s2, s4
135; GFX12-NEXT:    s_mov_b32 s3, s5
136; GFX12-NEXT:    s_mov_b32 s4, s6
137; GFX12-NEXT:    s_mov_b32 s5, s7
138; GFX12-NEXT:    s_mov_b32 s6, s8
139; GFX12-NEXT:    s_mov_b32 s7, s9
140; GFX12-NEXT:    image_store v[2:3], [v0, v1], s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_2D
141; GFX12-NEXT:    s_endpgm
142  call void @llvm.amdgcn.image.store.2d.v2f32.i32(<2 x float> %in, i32 3, i32 %s, i32 %t, <8 x i32> %rsrc, i32 0, i32 0)
143  ret void
144}
145
146define amdgpu_ps void @image_store_v3f32(<8 x i32> inreg %rsrc, i32 %s, i32 %t, <3 x float> %in) {
147; GFX6-LABEL: image_store_v3f32:
148; GFX6:       ; %bb.0:
149; GFX6-NEXT:    s_mov_b32 s0, s2
150; GFX6-NEXT:    s_mov_b32 s1, s3
151; GFX6-NEXT:    s_mov_b32 s2, s4
152; GFX6-NEXT:    s_mov_b32 s3, s5
153; GFX6-NEXT:    s_mov_b32 s4, s6
154; GFX6-NEXT:    s_mov_b32 s5, s7
155; GFX6-NEXT:    s_mov_b32 s6, s8
156; GFX6-NEXT:    s_mov_b32 s7, s9
157; GFX6-NEXT:    image_store v[2:4], v[0:1], s[0:7] dmask:0x7 unorm
158; GFX6-NEXT:    s_endpgm
159;
160; GFX8-LABEL: image_store_v3f32:
161; GFX8:       ; %bb.0:
162; GFX8-NEXT:    s_mov_b32 s0, s2
163; GFX8-NEXT:    s_mov_b32 s1, s3
164; GFX8-NEXT:    s_mov_b32 s2, s4
165; GFX8-NEXT:    s_mov_b32 s3, s5
166; GFX8-NEXT:    s_mov_b32 s4, s6
167; GFX8-NEXT:    s_mov_b32 s5, s7
168; GFX8-NEXT:    s_mov_b32 s6, s8
169; GFX8-NEXT:    s_mov_b32 s7, s9
170; GFX8-NEXT:    image_store v[2:4], v[0:1], s[0:7] dmask:0x7 unorm
171; GFX8-NEXT:    s_endpgm
172;
173; GFX10-LABEL: image_store_v3f32:
174; GFX10:       ; %bb.0:
175; GFX10-NEXT:    s_mov_b32 s0, s2
176; GFX10-NEXT:    s_mov_b32 s1, s3
177; GFX10-NEXT:    s_mov_b32 s2, s4
178; GFX10-NEXT:    s_mov_b32 s3, s5
179; GFX10-NEXT:    s_mov_b32 s4, s6
180; GFX10-NEXT:    s_mov_b32 s5, s7
181; GFX10-NEXT:    s_mov_b32 s6, s8
182; GFX10-NEXT:    s_mov_b32 s7, s9
183; GFX10-NEXT:    image_store v[2:4], v[0:1], s[0:7] dmask:0x7 dim:SQ_RSRC_IMG_2D unorm
184; GFX10-NEXT:    s_endpgm
185;
186; GFX11-LABEL: image_store_v3f32:
187; GFX11:       ; %bb.0:
188; GFX11-NEXT:    s_mov_b32 s0, s2
189; GFX11-NEXT:    s_mov_b32 s1, s3
190; GFX11-NEXT:    s_mov_b32 s2, s4
191; GFX11-NEXT:    s_mov_b32 s3, s5
192; GFX11-NEXT:    s_mov_b32 s4, s6
193; GFX11-NEXT:    s_mov_b32 s5, s7
194; GFX11-NEXT:    s_mov_b32 s6, s8
195; GFX11-NEXT:    s_mov_b32 s7, s9
196; GFX11-NEXT:    image_store v[2:4], v[0:1], s[0:7] dmask:0x7 dim:SQ_RSRC_IMG_2D unorm
197; GFX11-NEXT:    s_endpgm
198;
199; GFX12-LABEL: image_store_v3f32:
200; GFX12:       ; %bb.0:
201; GFX12-NEXT:    s_mov_b32 s0, s2
202; GFX12-NEXT:    s_mov_b32 s1, s3
203; GFX12-NEXT:    s_mov_b32 s2, s4
204; GFX12-NEXT:    s_mov_b32 s3, s5
205; GFX12-NEXT:    s_mov_b32 s4, s6
206; GFX12-NEXT:    s_mov_b32 s5, s7
207; GFX12-NEXT:    s_mov_b32 s6, s8
208; GFX12-NEXT:    s_mov_b32 s7, s9
209; GFX12-NEXT:    image_store v[2:4], [v0, v1], s[0:7] dmask:0x7 dim:SQ_RSRC_IMG_2D
210; GFX12-NEXT:    s_endpgm
211  call void @llvm.amdgcn.image.store.2d.v3f32.i32(<3 x float> %in, i32 7, i32 %s, i32 %t, <8 x i32> %rsrc, i32 0, i32 0)
212  ret void
213}
214
215define amdgpu_ps void @image_store_v4f32(<8 x i32> inreg %rsrc, i32 %s, i32 %t, <4 x float> %in) {
216; GFX6-LABEL: image_store_v4f32:
217; GFX6:       ; %bb.0:
218; GFX6-NEXT:    s_mov_b32 s0, s2
219; GFX6-NEXT:    s_mov_b32 s1, s3
220; GFX6-NEXT:    s_mov_b32 s2, s4
221; GFX6-NEXT:    s_mov_b32 s3, s5
222; GFX6-NEXT:    s_mov_b32 s4, s6
223; GFX6-NEXT:    s_mov_b32 s5, s7
224; GFX6-NEXT:    s_mov_b32 s6, s8
225; GFX6-NEXT:    s_mov_b32 s7, s9
226; GFX6-NEXT:    image_store v[2:5], v[0:1], s[0:7] dmask:0xf unorm
227; GFX6-NEXT:    s_endpgm
228;
229; GFX8-LABEL: image_store_v4f32:
230; GFX8:       ; %bb.0:
231; GFX8-NEXT:    s_mov_b32 s0, s2
232; GFX8-NEXT:    s_mov_b32 s1, s3
233; GFX8-NEXT:    s_mov_b32 s2, s4
234; GFX8-NEXT:    s_mov_b32 s3, s5
235; GFX8-NEXT:    s_mov_b32 s4, s6
236; GFX8-NEXT:    s_mov_b32 s5, s7
237; GFX8-NEXT:    s_mov_b32 s6, s8
238; GFX8-NEXT:    s_mov_b32 s7, s9
239; GFX8-NEXT:    image_store v[2:5], v[0:1], s[0:7] dmask:0xf unorm
240; GFX8-NEXT:    s_endpgm
241;
242; GFX10-LABEL: image_store_v4f32:
243; GFX10:       ; %bb.0:
244; GFX10-NEXT:    s_mov_b32 s0, s2
245; GFX10-NEXT:    s_mov_b32 s1, s3
246; GFX10-NEXT:    s_mov_b32 s2, s4
247; GFX10-NEXT:    s_mov_b32 s3, s5
248; GFX10-NEXT:    s_mov_b32 s4, s6
249; GFX10-NEXT:    s_mov_b32 s5, s7
250; GFX10-NEXT:    s_mov_b32 s6, s8
251; GFX10-NEXT:    s_mov_b32 s7, s9
252; GFX10-NEXT:    image_store v[2:5], v[0:1], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_2D unorm
253; GFX10-NEXT:    s_endpgm
254;
255; GFX11-LABEL: image_store_v4f32:
256; GFX11:       ; %bb.0:
257; GFX11-NEXT:    s_mov_b32 s0, s2
258; GFX11-NEXT:    s_mov_b32 s1, s3
259; GFX11-NEXT:    s_mov_b32 s2, s4
260; GFX11-NEXT:    s_mov_b32 s3, s5
261; GFX11-NEXT:    s_mov_b32 s4, s6
262; GFX11-NEXT:    s_mov_b32 s5, s7
263; GFX11-NEXT:    s_mov_b32 s6, s8
264; GFX11-NEXT:    s_mov_b32 s7, s9
265; GFX11-NEXT:    image_store v[2:5], v[0:1], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_2D unorm
266; GFX11-NEXT:    s_endpgm
267;
268; GFX12-LABEL: image_store_v4f32:
269; GFX12:       ; %bb.0:
270; GFX12-NEXT:    s_mov_b32 s0, s2
271; GFX12-NEXT:    s_mov_b32 s1, s3
272; GFX12-NEXT:    s_mov_b32 s2, s4
273; GFX12-NEXT:    s_mov_b32 s3, s5
274; GFX12-NEXT:    s_mov_b32 s4, s6
275; GFX12-NEXT:    s_mov_b32 s5, s7
276; GFX12-NEXT:    s_mov_b32 s6, s8
277; GFX12-NEXT:    s_mov_b32 s7, s9
278; GFX12-NEXT:    image_store v[2:5], [v0, v1], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_2D
279; GFX12-NEXT:    s_endpgm
280  call void @llvm.amdgcn.image.store.2d.v4f32.i32(<4 x float> %in, i32 15, i32 %s, i32 %t, <8 x i32> %rsrc, i32 0, i32 0)
281  ret void
282}
283
284define amdgpu_ps void @image_store_v4f32_dmask_0001(<8 x i32> inreg %rsrc, i32 %s, i32 %t, <4 x float> %in) {
285; GFX6-LABEL: image_store_v4f32_dmask_0001:
286; GFX6:       ; %bb.0:
287; GFX6-NEXT:    s_mov_b32 s0, s2
288; GFX6-NEXT:    s_mov_b32 s1, s3
289; GFX6-NEXT:    s_mov_b32 s2, s4
290; GFX6-NEXT:    s_mov_b32 s3, s5
291; GFX6-NEXT:    s_mov_b32 s4, s6
292; GFX6-NEXT:    s_mov_b32 s5, s7
293; GFX6-NEXT:    s_mov_b32 s6, s8
294; GFX6-NEXT:    s_mov_b32 s7, s9
295; GFX6-NEXT:    image_store v[2:5], v[0:1], s[0:7] dmask:0x1 unorm
296; GFX6-NEXT:    s_endpgm
297;
298; GFX8-LABEL: image_store_v4f32_dmask_0001:
299; GFX8:       ; %bb.0:
300; GFX8-NEXT:    s_mov_b32 s0, s2
301; GFX8-NEXT:    s_mov_b32 s1, s3
302; GFX8-NEXT:    s_mov_b32 s2, s4
303; GFX8-NEXT:    s_mov_b32 s3, s5
304; GFX8-NEXT:    s_mov_b32 s4, s6
305; GFX8-NEXT:    s_mov_b32 s5, s7
306; GFX8-NEXT:    s_mov_b32 s6, s8
307; GFX8-NEXT:    s_mov_b32 s7, s9
308; GFX8-NEXT:    image_store v[2:5], v[0:1], s[0:7] dmask:0x1 unorm
309; GFX8-NEXT:    s_endpgm
310;
311; GFX10-LABEL: image_store_v4f32_dmask_0001:
312; GFX10:       ; %bb.0:
313; GFX10-NEXT:    s_mov_b32 s0, s2
314; GFX10-NEXT:    s_mov_b32 s1, s3
315; GFX10-NEXT:    s_mov_b32 s2, s4
316; GFX10-NEXT:    s_mov_b32 s3, s5
317; GFX10-NEXT:    s_mov_b32 s4, s6
318; GFX10-NEXT:    s_mov_b32 s5, s7
319; GFX10-NEXT:    s_mov_b32 s6, s8
320; GFX10-NEXT:    s_mov_b32 s7, s9
321; GFX10-NEXT:    image_store v[2:5], v[0:1], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D unorm
322; GFX10-NEXT:    s_endpgm
323;
324; GFX11-LABEL: image_store_v4f32_dmask_0001:
325; GFX11:       ; %bb.0:
326; GFX11-NEXT:    s_mov_b32 s0, s2
327; GFX11-NEXT:    s_mov_b32 s1, s3
328; GFX11-NEXT:    s_mov_b32 s2, s4
329; GFX11-NEXT:    s_mov_b32 s3, s5
330; GFX11-NEXT:    s_mov_b32 s4, s6
331; GFX11-NEXT:    s_mov_b32 s5, s7
332; GFX11-NEXT:    s_mov_b32 s6, s8
333; GFX11-NEXT:    s_mov_b32 s7, s9
334; GFX11-NEXT:    image_store v[2:5], v[0:1], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D unorm
335; GFX11-NEXT:    s_endpgm
336;
337; GFX12-LABEL: image_store_v4f32_dmask_0001:
338; GFX12:       ; %bb.0:
339; GFX12-NEXT:    s_mov_b32 s0, s2
340; GFX12-NEXT:    s_mov_b32 s1, s3
341; GFX12-NEXT:    s_mov_b32 s2, s4
342; GFX12-NEXT:    s_mov_b32 s3, s5
343; GFX12-NEXT:    s_mov_b32 s4, s6
344; GFX12-NEXT:    s_mov_b32 s5, s7
345; GFX12-NEXT:    s_mov_b32 s6, s8
346; GFX12-NEXT:    s_mov_b32 s7, s9
347; GFX12-NEXT:    image_store v[2:5], [v0, v1], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
348; GFX12-NEXT:    s_endpgm
349  call void @llvm.amdgcn.image.store.2d.v4f32.i32(<4 x float> %in, i32 1, i32 %s, i32 %t, <8 x i32> %rsrc, i32 0, i32 0)
350  ret void
351}
352
353define amdgpu_ps void @image_store_v4f32_dmask_0010(<8 x i32> inreg %rsrc, i32 %s, i32 %t, <4 x float> %in) {
354; GFX6-LABEL: image_store_v4f32_dmask_0010:
355; GFX6:       ; %bb.0:
356; GFX6-NEXT:    s_mov_b32 s0, s2
357; GFX6-NEXT:    s_mov_b32 s1, s3
358; GFX6-NEXT:    s_mov_b32 s2, s4
359; GFX6-NEXT:    s_mov_b32 s3, s5
360; GFX6-NEXT:    s_mov_b32 s4, s6
361; GFX6-NEXT:    s_mov_b32 s5, s7
362; GFX6-NEXT:    s_mov_b32 s6, s8
363; GFX6-NEXT:    s_mov_b32 s7, s9
364; GFX6-NEXT:    image_store v[2:5], v[0:1], s[0:7] dmask:0x2 unorm
365; GFX6-NEXT:    s_endpgm
366;
367; GFX8-LABEL: image_store_v4f32_dmask_0010:
368; GFX8:       ; %bb.0:
369; GFX8-NEXT:    s_mov_b32 s0, s2
370; GFX8-NEXT:    s_mov_b32 s1, s3
371; GFX8-NEXT:    s_mov_b32 s2, s4
372; GFX8-NEXT:    s_mov_b32 s3, s5
373; GFX8-NEXT:    s_mov_b32 s4, s6
374; GFX8-NEXT:    s_mov_b32 s5, s7
375; GFX8-NEXT:    s_mov_b32 s6, s8
376; GFX8-NEXT:    s_mov_b32 s7, s9
377; GFX8-NEXT:    image_store v[2:5], v[0:1], s[0:7] dmask:0x2 unorm
378; GFX8-NEXT:    s_endpgm
379;
380; GFX10-LABEL: image_store_v4f32_dmask_0010:
381; GFX10:       ; %bb.0:
382; GFX10-NEXT:    s_mov_b32 s0, s2
383; GFX10-NEXT:    s_mov_b32 s1, s3
384; GFX10-NEXT:    s_mov_b32 s2, s4
385; GFX10-NEXT:    s_mov_b32 s3, s5
386; GFX10-NEXT:    s_mov_b32 s4, s6
387; GFX10-NEXT:    s_mov_b32 s5, s7
388; GFX10-NEXT:    s_mov_b32 s6, s8
389; GFX10-NEXT:    s_mov_b32 s7, s9
390; GFX10-NEXT:    image_store v[2:5], v[0:1], s[0:7] dmask:0x2 dim:SQ_RSRC_IMG_2D unorm
391; GFX10-NEXT:    s_endpgm
392;
393; GFX11-LABEL: image_store_v4f32_dmask_0010:
394; GFX11:       ; %bb.0:
395; GFX11-NEXT:    s_mov_b32 s0, s2
396; GFX11-NEXT:    s_mov_b32 s1, s3
397; GFX11-NEXT:    s_mov_b32 s2, s4
398; GFX11-NEXT:    s_mov_b32 s3, s5
399; GFX11-NEXT:    s_mov_b32 s4, s6
400; GFX11-NEXT:    s_mov_b32 s5, s7
401; GFX11-NEXT:    s_mov_b32 s6, s8
402; GFX11-NEXT:    s_mov_b32 s7, s9
403; GFX11-NEXT:    image_store v[2:5], v[0:1], s[0:7] dmask:0x2 dim:SQ_RSRC_IMG_2D unorm
404; GFX11-NEXT:    s_endpgm
405;
406; GFX12-LABEL: image_store_v4f32_dmask_0010:
407; GFX12:       ; %bb.0:
408; GFX12-NEXT:    s_mov_b32 s0, s2
409; GFX12-NEXT:    s_mov_b32 s1, s3
410; GFX12-NEXT:    s_mov_b32 s2, s4
411; GFX12-NEXT:    s_mov_b32 s3, s5
412; GFX12-NEXT:    s_mov_b32 s4, s6
413; GFX12-NEXT:    s_mov_b32 s5, s7
414; GFX12-NEXT:    s_mov_b32 s6, s8
415; GFX12-NEXT:    s_mov_b32 s7, s9
416; GFX12-NEXT:    image_store v[2:5], [v0, v1], s[0:7] dmask:0x2 dim:SQ_RSRC_IMG_2D
417; GFX12-NEXT:    s_endpgm
418  call void @llvm.amdgcn.image.store.2d.v4f32.i32(<4 x float> %in, i32 2, i32 %s, i32 %t, <8 x i32> %rsrc, i32 0, i32 0)
419  ret void
420}
421
422define amdgpu_ps void @image_store_v4f32_dmask_0100(<8 x i32> inreg %rsrc, i32 %s, i32 %t, <4 x float> %in) {
423; GFX6-LABEL: image_store_v4f32_dmask_0100:
424; GFX6:       ; %bb.0:
425; GFX6-NEXT:    s_mov_b32 s0, s2
426; GFX6-NEXT:    s_mov_b32 s1, s3
427; GFX6-NEXT:    s_mov_b32 s2, s4
428; GFX6-NEXT:    s_mov_b32 s3, s5
429; GFX6-NEXT:    s_mov_b32 s4, s6
430; GFX6-NEXT:    s_mov_b32 s5, s7
431; GFX6-NEXT:    s_mov_b32 s6, s8
432; GFX6-NEXT:    s_mov_b32 s7, s9
433; GFX6-NEXT:    image_store v[2:5], v[0:1], s[0:7] dmask:0x4 unorm
434; GFX6-NEXT:    s_endpgm
435;
436; GFX8-LABEL: image_store_v4f32_dmask_0100:
437; GFX8:       ; %bb.0:
438; GFX8-NEXT:    s_mov_b32 s0, s2
439; GFX8-NEXT:    s_mov_b32 s1, s3
440; GFX8-NEXT:    s_mov_b32 s2, s4
441; GFX8-NEXT:    s_mov_b32 s3, s5
442; GFX8-NEXT:    s_mov_b32 s4, s6
443; GFX8-NEXT:    s_mov_b32 s5, s7
444; GFX8-NEXT:    s_mov_b32 s6, s8
445; GFX8-NEXT:    s_mov_b32 s7, s9
446; GFX8-NEXT:    image_store v[2:5], v[0:1], s[0:7] dmask:0x4 unorm
447; GFX8-NEXT:    s_endpgm
448;
449; GFX10-LABEL: image_store_v4f32_dmask_0100:
450; GFX10:       ; %bb.0:
451; GFX10-NEXT:    s_mov_b32 s0, s2
452; GFX10-NEXT:    s_mov_b32 s1, s3
453; GFX10-NEXT:    s_mov_b32 s2, s4
454; GFX10-NEXT:    s_mov_b32 s3, s5
455; GFX10-NEXT:    s_mov_b32 s4, s6
456; GFX10-NEXT:    s_mov_b32 s5, s7
457; GFX10-NEXT:    s_mov_b32 s6, s8
458; GFX10-NEXT:    s_mov_b32 s7, s9
459; GFX10-NEXT:    image_store v[2:5], v[0:1], s[0:7] dmask:0x4 dim:SQ_RSRC_IMG_2D unorm
460; GFX10-NEXT:    s_endpgm
461;
462; GFX11-LABEL: image_store_v4f32_dmask_0100:
463; GFX11:       ; %bb.0:
464; GFX11-NEXT:    s_mov_b32 s0, s2
465; GFX11-NEXT:    s_mov_b32 s1, s3
466; GFX11-NEXT:    s_mov_b32 s2, s4
467; GFX11-NEXT:    s_mov_b32 s3, s5
468; GFX11-NEXT:    s_mov_b32 s4, s6
469; GFX11-NEXT:    s_mov_b32 s5, s7
470; GFX11-NEXT:    s_mov_b32 s6, s8
471; GFX11-NEXT:    s_mov_b32 s7, s9
472; GFX11-NEXT:    image_store v[2:5], v[0:1], s[0:7] dmask:0x4 dim:SQ_RSRC_IMG_2D unorm
473; GFX11-NEXT:    s_endpgm
474;
475; GFX12-LABEL: image_store_v4f32_dmask_0100:
476; GFX12:       ; %bb.0:
477; GFX12-NEXT:    s_mov_b32 s0, s2
478; GFX12-NEXT:    s_mov_b32 s1, s3
479; GFX12-NEXT:    s_mov_b32 s2, s4
480; GFX12-NEXT:    s_mov_b32 s3, s5
481; GFX12-NEXT:    s_mov_b32 s4, s6
482; GFX12-NEXT:    s_mov_b32 s5, s7
483; GFX12-NEXT:    s_mov_b32 s6, s8
484; GFX12-NEXT:    s_mov_b32 s7, s9
485; GFX12-NEXT:    image_store v[2:5], [v0, v1], s[0:7] dmask:0x4 dim:SQ_RSRC_IMG_2D
486; GFX12-NEXT:    s_endpgm
487  call void @llvm.amdgcn.image.store.2d.v4f32.i32(<4 x float> %in, i32 4, i32 %s, i32 %t, <8 x i32> %rsrc, i32 0, i32 0)
488  ret void
489}
490
491define amdgpu_ps void @image_store_v4f32_dmask_1000(<8 x i32> inreg %rsrc, i32 %s, i32 %t, <4 x float> %in) {
492; GFX6-LABEL: image_store_v4f32_dmask_1000:
493; GFX6:       ; %bb.0:
494; GFX6-NEXT:    s_mov_b32 s0, s2
495; GFX6-NEXT:    s_mov_b32 s1, s3
496; GFX6-NEXT:    s_mov_b32 s2, s4
497; GFX6-NEXT:    s_mov_b32 s3, s5
498; GFX6-NEXT:    s_mov_b32 s4, s6
499; GFX6-NEXT:    s_mov_b32 s5, s7
500; GFX6-NEXT:    s_mov_b32 s6, s8
501; GFX6-NEXT:    s_mov_b32 s7, s9
502; GFX6-NEXT:    image_store v[2:5], v[0:1], s[0:7] dmask:0x8 unorm
503; GFX6-NEXT:    s_endpgm
504;
505; GFX8-LABEL: image_store_v4f32_dmask_1000:
506; GFX8:       ; %bb.0:
507; GFX8-NEXT:    s_mov_b32 s0, s2
508; GFX8-NEXT:    s_mov_b32 s1, s3
509; GFX8-NEXT:    s_mov_b32 s2, s4
510; GFX8-NEXT:    s_mov_b32 s3, s5
511; GFX8-NEXT:    s_mov_b32 s4, s6
512; GFX8-NEXT:    s_mov_b32 s5, s7
513; GFX8-NEXT:    s_mov_b32 s6, s8
514; GFX8-NEXT:    s_mov_b32 s7, s9
515; GFX8-NEXT:    image_store v[2:5], v[0:1], s[0:7] dmask:0x8 unorm
516; GFX8-NEXT:    s_endpgm
517;
518; GFX10-LABEL: image_store_v4f32_dmask_1000:
519; GFX10:       ; %bb.0:
520; GFX10-NEXT:    s_mov_b32 s0, s2
521; GFX10-NEXT:    s_mov_b32 s1, s3
522; GFX10-NEXT:    s_mov_b32 s2, s4
523; GFX10-NEXT:    s_mov_b32 s3, s5
524; GFX10-NEXT:    s_mov_b32 s4, s6
525; GFX10-NEXT:    s_mov_b32 s5, s7
526; GFX10-NEXT:    s_mov_b32 s6, s8
527; GFX10-NEXT:    s_mov_b32 s7, s9
528; GFX10-NEXT:    image_store v[2:5], v[0:1], s[0:7] dmask:0x8 dim:SQ_RSRC_IMG_2D unorm
529; GFX10-NEXT:    s_endpgm
530;
531; GFX11-LABEL: image_store_v4f32_dmask_1000:
532; GFX11:       ; %bb.0:
533; GFX11-NEXT:    s_mov_b32 s0, s2
534; GFX11-NEXT:    s_mov_b32 s1, s3
535; GFX11-NEXT:    s_mov_b32 s2, s4
536; GFX11-NEXT:    s_mov_b32 s3, s5
537; GFX11-NEXT:    s_mov_b32 s4, s6
538; GFX11-NEXT:    s_mov_b32 s5, s7
539; GFX11-NEXT:    s_mov_b32 s6, s8
540; GFX11-NEXT:    s_mov_b32 s7, s9
541; GFX11-NEXT:    image_store v[2:5], v[0:1], s[0:7] dmask:0x8 dim:SQ_RSRC_IMG_2D unorm
542; GFX11-NEXT:    s_endpgm
543;
544; GFX12-LABEL: image_store_v4f32_dmask_1000:
545; GFX12:       ; %bb.0:
546; GFX12-NEXT:    s_mov_b32 s0, s2
547; GFX12-NEXT:    s_mov_b32 s1, s3
548; GFX12-NEXT:    s_mov_b32 s2, s4
549; GFX12-NEXT:    s_mov_b32 s3, s5
550; GFX12-NEXT:    s_mov_b32 s4, s6
551; GFX12-NEXT:    s_mov_b32 s5, s7
552; GFX12-NEXT:    s_mov_b32 s6, s8
553; GFX12-NEXT:    s_mov_b32 s7, s9
554; GFX12-NEXT:    image_store v[2:5], [v0, v1], s[0:7] dmask:0x8 dim:SQ_RSRC_IMG_2D
555; GFX12-NEXT:    s_endpgm
556  call void @llvm.amdgcn.image.store.2d.v4f32.i32(<4 x float> %in, i32 8, i32 %s, i32 %t, <8 x i32> %rsrc, i32 0, i32 0)
557  ret void
558}
559
560define amdgpu_ps void @image_store_v4f32_dmask_0011(<8 x i32> inreg %rsrc, i32 %s, i32 %t, <4 x float> %in) {
561; GFX6-LABEL: image_store_v4f32_dmask_0011:
562; GFX6:       ; %bb.0:
563; GFX6-NEXT:    s_mov_b32 s0, s2
564; GFX6-NEXT:    s_mov_b32 s1, s3
565; GFX6-NEXT:    s_mov_b32 s2, s4
566; GFX6-NEXT:    s_mov_b32 s3, s5
567; GFX6-NEXT:    s_mov_b32 s4, s6
568; GFX6-NEXT:    s_mov_b32 s5, s7
569; GFX6-NEXT:    s_mov_b32 s6, s8
570; GFX6-NEXT:    s_mov_b32 s7, s9
571; GFX6-NEXT:    image_store v[2:5], v[0:1], s[0:7] dmask:0x3 unorm
572; GFX6-NEXT:    s_endpgm
573;
574; GFX8-LABEL: image_store_v4f32_dmask_0011:
575; GFX8:       ; %bb.0:
576; GFX8-NEXT:    s_mov_b32 s0, s2
577; GFX8-NEXT:    s_mov_b32 s1, s3
578; GFX8-NEXT:    s_mov_b32 s2, s4
579; GFX8-NEXT:    s_mov_b32 s3, s5
580; GFX8-NEXT:    s_mov_b32 s4, s6
581; GFX8-NEXT:    s_mov_b32 s5, s7
582; GFX8-NEXT:    s_mov_b32 s6, s8
583; GFX8-NEXT:    s_mov_b32 s7, s9
584; GFX8-NEXT:    image_store v[2:5], v[0:1], s[0:7] dmask:0x3 unorm
585; GFX8-NEXT:    s_endpgm
586;
587; GFX10-LABEL: image_store_v4f32_dmask_0011:
588; GFX10:       ; %bb.0:
589; GFX10-NEXT:    s_mov_b32 s0, s2
590; GFX10-NEXT:    s_mov_b32 s1, s3
591; GFX10-NEXT:    s_mov_b32 s2, s4
592; GFX10-NEXT:    s_mov_b32 s3, s5
593; GFX10-NEXT:    s_mov_b32 s4, s6
594; GFX10-NEXT:    s_mov_b32 s5, s7
595; GFX10-NEXT:    s_mov_b32 s6, s8
596; GFX10-NEXT:    s_mov_b32 s7, s9
597; GFX10-NEXT:    image_store v[2:5], v[0:1], s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_2D unorm
598; GFX10-NEXT:    s_endpgm
599;
600; GFX11-LABEL: image_store_v4f32_dmask_0011:
601; GFX11:       ; %bb.0:
602; GFX11-NEXT:    s_mov_b32 s0, s2
603; GFX11-NEXT:    s_mov_b32 s1, s3
604; GFX11-NEXT:    s_mov_b32 s2, s4
605; GFX11-NEXT:    s_mov_b32 s3, s5
606; GFX11-NEXT:    s_mov_b32 s4, s6
607; GFX11-NEXT:    s_mov_b32 s5, s7
608; GFX11-NEXT:    s_mov_b32 s6, s8
609; GFX11-NEXT:    s_mov_b32 s7, s9
610; GFX11-NEXT:    image_store v[2:5], v[0:1], s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_2D unorm
611; GFX11-NEXT:    s_endpgm
612;
613; GFX12-LABEL: image_store_v4f32_dmask_0011:
614; GFX12:       ; %bb.0:
615; GFX12-NEXT:    s_mov_b32 s0, s2
616; GFX12-NEXT:    s_mov_b32 s1, s3
617; GFX12-NEXT:    s_mov_b32 s2, s4
618; GFX12-NEXT:    s_mov_b32 s3, s5
619; GFX12-NEXT:    s_mov_b32 s4, s6
620; GFX12-NEXT:    s_mov_b32 s5, s7
621; GFX12-NEXT:    s_mov_b32 s6, s8
622; GFX12-NEXT:    s_mov_b32 s7, s9
623; GFX12-NEXT:    image_store v[2:5], [v0, v1], s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_2D
624; GFX12-NEXT:    s_endpgm
625  call void @llvm.amdgcn.image.store.2d.v4f32.i32(<4 x float> %in, i32 3, i32 %s, i32 %t, <8 x i32> %rsrc, i32 0, i32 0)
626  ret void
627}
628
629define amdgpu_ps void @image_store_v4f32_dmask_0110(<8 x i32> inreg %rsrc, i32 %s, i32 %t, <4 x float> %in) {
630; GFX6-LABEL: image_store_v4f32_dmask_0110:
631; GFX6:       ; %bb.0:
632; GFX6-NEXT:    s_mov_b32 s0, s2
633; GFX6-NEXT:    s_mov_b32 s1, s3
634; GFX6-NEXT:    s_mov_b32 s2, s4
635; GFX6-NEXT:    s_mov_b32 s3, s5
636; GFX6-NEXT:    s_mov_b32 s4, s6
637; GFX6-NEXT:    s_mov_b32 s5, s7
638; GFX6-NEXT:    s_mov_b32 s6, s8
639; GFX6-NEXT:    s_mov_b32 s7, s9
640; GFX6-NEXT:    image_store v[2:5], v[0:1], s[0:7] dmask:0x6 unorm
641; GFX6-NEXT:    s_endpgm
642;
643; GFX8-LABEL: image_store_v4f32_dmask_0110:
644; GFX8:       ; %bb.0:
645; GFX8-NEXT:    s_mov_b32 s0, s2
646; GFX8-NEXT:    s_mov_b32 s1, s3
647; GFX8-NEXT:    s_mov_b32 s2, s4
648; GFX8-NEXT:    s_mov_b32 s3, s5
649; GFX8-NEXT:    s_mov_b32 s4, s6
650; GFX8-NEXT:    s_mov_b32 s5, s7
651; GFX8-NEXT:    s_mov_b32 s6, s8
652; GFX8-NEXT:    s_mov_b32 s7, s9
653; GFX8-NEXT:    image_store v[2:5], v[0:1], s[0:7] dmask:0x6 unorm
654; GFX8-NEXT:    s_endpgm
655;
656; GFX10-LABEL: image_store_v4f32_dmask_0110:
657; GFX10:       ; %bb.0:
658; GFX10-NEXT:    s_mov_b32 s0, s2
659; GFX10-NEXT:    s_mov_b32 s1, s3
660; GFX10-NEXT:    s_mov_b32 s2, s4
661; GFX10-NEXT:    s_mov_b32 s3, s5
662; GFX10-NEXT:    s_mov_b32 s4, s6
663; GFX10-NEXT:    s_mov_b32 s5, s7
664; GFX10-NEXT:    s_mov_b32 s6, s8
665; GFX10-NEXT:    s_mov_b32 s7, s9
666; GFX10-NEXT:    image_store v[2:5], v[0:1], s[0:7] dmask:0x6 dim:SQ_RSRC_IMG_2D unorm
667; GFX10-NEXT:    s_endpgm
668;
669; GFX11-LABEL: image_store_v4f32_dmask_0110:
670; GFX11:       ; %bb.0:
671; GFX11-NEXT:    s_mov_b32 s0, s2
672; GFX11-NEXT:    s_mov_b32 s1, s3
673; GFX11-NEXT:    s_mov_b32 s2, s4
674; GFX11-NEXT:    s_mov_b32 s3, s5
675; GFX11-NEXT:    s_mov_b32 s4, s6
676; GFX11-NEXT:    s_mov_b32 s5, s7
677; GFX11-NEXT:    s_mov_b32 s6, s8
678; GFX11-NEXT:    s_mov_b32 s7, s9
679; GFX11-NEXT:    image_store v[2:5], v[0:1], s[0:7] dmask:0x6 dim:SQ_RSRC_IMG_2D unorm
680; GFX11-NEXT:    s_endpgm
681;
682; GFX12-LABEL: image_store_v4f32_dmask_0110:
683; GFX12:       ; %bb.0:
684; GFX12-NEXT:    s_mov_b32 s0, s2
685; GFX12-NEXT:    s_mov_b32 s1, s3
686; GFX12-NEXT:    s_mov_b32 s2, s4
687; GFX12-NEXT:    s_mov_b32 s3, s5
688; GFX12-NEXT:    s_mov_b32 s4, s6
689; GFX12-NEXT:    s_mov_b32 s5, s7
690; GFX12-NEXT:    s_mov_b32 s6, s8
691; GFX12-NEXT:    s_mov_b32 s7, s9
692; GFX12-NEXT:    image_store v[2:5], [v0, v1], s[0:7] dmask:0x6 dim:SQ_RSRC_IMG_2D
693; GFX12-NEXT:    s_endpgm
694  call void @llvm.amdgcn.image.store.2d.v4f32.i32(<4 x float> %in, i32 6, i32 %s, i32 %t, <8 x i32> %rsrc, i32 0, i32 0)
695  ret void
696}
697
698define amdgpu_ps void @image_store_f32_dmask_1111(<8 x i32> inreg %rsrc, i32 inreg %s, i32 inreg %t, float %in) #0 {
699; GFX6-LABEL: image_store_f32_dmask_1111:
700; GFX6:       ; %bb.0:
701; GFX6-NEXT:    v_mov_b32_e32 v1, s10
702; GFX6-NEXT:    s_mov_b32 s0, s2
703; GFX6-NEXT:    s_mov_b32 s1, s3
704; GFX6-NEXT:    s_mov_b32 s2, s4
705; GFX6-NEXT:    s_mov_b32 s3, s5
706; GFX6-NEXT:    s_mov_b32 s4, s6
707; GFX6-NEXT:    s_mov_b32 s5, s7
708; GFX6-NEXT:    s_mov_b32 s6, s8
709; GFX6-NEXT:    s_mov_b32 s7, s9
710; GFX6-NEXT:    v_mov_b32_e32 v2, s11
711; GFX6-NEXT:    image_store v0, v[1:2], s[0:7] dmask:0xf unorm
712; GFX6-NEXT:    s_endpgm
713;
714; GFX8-LABEL: image_store_f32_dmask_1111:
715; GFX8:       ; %bb.0:
716; GFX8-NEXT:    v_mov_b32_e32 v1, s10
717; GFX8-NEXT:    s_mov_b32 s0, s2
718; GFX8-NEXT:    s_mov_b32 s1, s3
719; GFX8-NEXT:    s_mov_b32 s2, s4
720; GFX8-NEXT:    s_mov_b32 s3, s5
721; GFX8-NEXT:    s_mov_b32 s4, s6
722; GFX8-NEXT:    s_mov_b32 s5, s7
723; GFX8-NEXT:    s_mov_b32 s6, s8
724; GFX8-NEXT:    s_mov_b32 s7, s9
725; GFX8-NEXT:    v_mov_b32_e32 v2, s11
726; GFX8-NEXT:    image_store v0, v[1:2], s[0:7] dmask:0xf unorm
727; GFX8-NEXT:    s_endpgm
728;
729; GFX10-LABEL: image_store_f32_dmask_1111:
730; GFX10:       ; %bb.0:
731; GFX10-NEXT:    v_mov_b32_e32 v1, s10
732; GFX10-NEXT:    v_mov_b32_e32 v2, s11
733; GFX10-NEXT:    s_mov_b32 s0, s2
734; GFX10-NEXT:    s_mov_b32 s1, s3
735; GFX10-NEXT:    s_mov_b32 s2, s4
736; GFX10-NEXT:    s_mov_b32 s3, s5
737; GFX10-NEXT:    s_mov_b32 s4, s6
738; GFX10-NEXT:    s_mov_b32 s5, s7
739; GFX10-NEXT:    s_mov_b32 s6, s8
740; GFX10-NEXT:    s_mov_b32 s7, s9
741; GFX10-NEXT:    image_store v0, v[1:2], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_2D unorm
742; GFX10-NEXT:    s_endpgm
743;
744; GFX11-LABEL: image_store_f32_dmask_1111:
745; GFX11:       ; %bb.0:
746; GFX11-NEXT:    v_dual_mov_b32 v1, s10 :: v_dual_mov_b32 v2, s11
747; GFX11-NEXT:    s_mov_b32 s0, s2
748; GFX11-NEXT:    s_mov_b32 s1, s3
749; GFX11-NEXT:    s_mov_b32 s2, s4
750; GFX11-NEXT:    s_mov_b32 s3, s5
751; GFX11-NEXT:    s_mov_b32 s4, s6
752; GFX11-NEXT:    s_mov_b32 s5, s7
753; GFX11-NEXT:    s_mov_b32 s6, s8
754; GFX11-NEXT:    s_mov_b32 s7, s9
755; GFX11-NEXT:    image_store v0, v[1:2], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_2D unorm
756; GFX11-NEXT:    s_endpgm
757;
758; GFX12-LABEL: image_store_f32_dmask_1111:
759; GFX12:       ; %bb.0:
760; GFX12-NEXT:    v_dual_mov_b32 v1, s10 :: v_dual_mov_b32 v2, s11
761; GFX12-NEXT:    s_mov_b32 s0, s2
762; GFX12-NEXT:    s_mov_b32 s1, s3
763; GFX12-NEXT:    s_mov_b32 s2, s4
764; GFX12-NEXT:    s_mov_b32 s3, s5
765; GFX12-NEXT:    s_mov_b32 s4, s6
766; GFX12-NEXT:    s_mov_b32 s5, s7
767; GFX12-NEXT:    s_mov_b32 s6, s8
768; GFX12-NEXT:    s_mov_b32 s7, s9
769; GFX12-NEXT:    image_store v0, [v1, v2], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_2D
770; GFX12-NEXT:    s_endpgm
771  tail call void @llvm.amdgcn.image.store.2d.f32.i32(float %in, i32 15, i32 %s, i32 %t, <8 x i32> %rsrc, i32 0, i32 0)
772  ret void
773}
774
775declare void @llvm.amdgcn.image.store.2d.f32.i32(float, i32 immarg, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #0
776declare void @llvm.amdgcn.image.store.2d.v2f32.i32(<2 x float>, i32 immarg, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #0
777declare void @llvm.amdgcn.image.store.2d.v3f32.i32(<3 x float>, i32 immarg, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #0
778declare void @llvm.amdgcn.image.store.2d.v4f32.i32(<4 x float>, i32 immarg, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #0
779
780attributes #0 = { nounwind writeonly }
781