xref: /llvm-project/llvm/test/CodeGen/AMDGPU/whole-wave-register-spill.ll (revision 11b040192640ef3b1f481124c440f464ed6ec86a)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 --verify-machineinstrs -o - %s | FileCheck -check-prefix=GCN %s
3; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 -O0 --verify-machineinstrs -o - %s | FileCheck -check-prefix=GCN-O0 %s
4
5; Test whole-wave register spilling.
6
7; In this testcase, the return address registers, PC value (SGPR30_SGPR31) and the scratch SGPR used in
8; the inline asm statements should be preserved across the call. Since the test limits the VGPR numbers,
9; the PC will be spilled to the only available CSR VGPR (VGPR40) as we spill CSR SGPRs including the PC
10; directly to the physical VGPR lane to correctly generate the CFIs. The SGPR20 will get spilled to the
11; virtual VGPR lane and that would be allocated by regalloc. Since there is no free VGPR to allocate, RA
12; must spill a scratch VGPR. The writelane/readlane instructions that spill/restore SGPRs into/from VGPR
13; are whole-wave operations and hence the VGPRs involved in such operations require whole-wave spilling.
14
15define void @test() #0 {
16; GCN-LABEL: test:
17; GCN:       ; %bb.0:
18; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
19; GCN-NEXT:    s_mov_b32 s16, s33
20; GCN-NEXT:    s_mov_b32 s33, s32
21; GCN-NEXT:    s_xor_saveexec_b64 s[18:19], -1
22; GCN-NEXT:    buffer_store_dword v39, off, s[0:3], s33 offset:8 ; 4-byte Folded Spill
23; GCN-NEXT:    s_mov_b64 exec, -1
24; GCN-NEXT:    buffer_store_dword v40, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill
25; GCN-NEXT:    s_mov_b64 exec, s[18:19]
26; GCN-NEXT:    v_writelane_b32 v40, s16, 4
27; GCN-NEXT:    v_writelane_b32 v40, s28, 2
28; GCN-NEXT:    v_writelane_b32 v40, s29, 3
29; GCN-NEXT:    v_writelane_b32 v40, s30, 0
30; GCN-NEXT:    ; implicit-def: $vgpr39 : SGPR spill to VGPR lane
31; GCN-NEXT:    s_addk_i32 s32, 0x400
32; GCN-NEXT:    v_writelane_b32 v40, s31, 1
33; GCN-NEXT:    ;;#ASMSTART
34; GCN-NEXT:    ; def s16
35; GCN-NEXT:    ;;#ASMEND
36; GCN-NEXT:    v_writelane_b32 v39, s16, 0
37; GCN-NEXT:    s_or_saveexec_b64 s[28:29], -1
38; GCN-NEXT:    buffer_store_dword v39, off, s[0:3], s33 ; 4-byte Folded Spill
39; GCN-NEXT:    s_mov_b64 exec, s[28:29]
40; GCN-NEXT:    s_getpc_b64 s[16:17]
41; GCN-NEXT:    s_add_u32 s16, s16, ext_func@gotpcrel32@lo+4
42; GCN-NEXT:    s_addc_u32 s17, s17, ext_func@gotpcrel32@hi+12
43; GCN-NEXT:    s_load_dwordx2 s[16:17], s[16:17], 0x0
44; GCN-NEXT:    s_waitcnt lgkmcnt(0)
45; GCN-NEXT:    s_swappc_b64 s[30:31], s[16:17]
46; GCN-NEXT:    s_or_saveexec_b64 s[28:29], -1
47; GCN-NEXT:    buffer_load_dword v39, off, s[0:3], s33 ; 4-byte Folded Reload
48; GCN-NEXT:    s_mov_b64 exec, s[28:29]
49; GCN-NEXT:    s_waitcnt vmcnt(0)
50; GCN-NEXT:    v_readlane_b32 s4, v39, 0
51; GCN-NEXT:    v_mov_b32_e32 v0, s4
52; GCN-NEXT:    global_store_dword v[0:1], v0, off
53; GCN-NEXT:    s_waitcnt vmcnt(0)
54; GCN-NEXT:    v_readlane_b32 s31, v40, 1
55; GCN-NEXT:    v_readlane_b32 s30, v40, 0
56; GCN-NEXT:    s_mov_b32 s32, s33
57; GCN-NEXT:    v_readlane_b32 s4, v40, 4
58; GCN-NEXT:    v_readlane_b32 s28, v40, 2
59; GCN-NEXT:    v_readlane_b32 s29, v40, 3
60; GCN-NEXT:    s_xor_saveexec_b64 s[6:7], -1
61; GCN-NEXT:    buffer_load_dword v39, off, s[0:3], s33 offset:8 ; 4-byte Folded Reload
62; GCN-NEXT:    s_mov_b64 exec, -1
63; GCN-NEXT:    buffer_load_dword v40, off, s[0:3], s33 offset:4 ; 4-byte Folded Reload
64; GCN-NEXT:    s_mov_b64 exec, s[6:7]
65; GCN-NEXT:    s_mov_b32 s33, s4
66; GCN-NEXT:    s_waitcnt vmcnt(0)
67; GCN-NEXT:    s_setpc_b64 s[30:31]
68;
69; GCN-O0-LABEL: test:
70; GCN-O0:       ; %bb.0:
71; GCN-O0-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
72; GCN-O0-NEXT:    s_mov_b32 s16, s33
73; GCN-O0-NEXT:    s_mov_b32 s33, s32
74; GCN-O0-NEXT:    s_xor_saveexec_b64 s[18:19], -1
75; GCN-O0-NEXT:    buffer_store_dword v39, off, s[0:3], s33 offset:8 ; 4-byte Folded Spill
76; GCN-O0-NEXT:    s_mov_b64 exec, -1
77; GCN-O0-NEXT:    buffer_store_dword v40, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill
78; GCN-O0-NEXT:    s_mov_b64 exec, s[18:19]
79; GCN-O0-NEXT:    v_writelane_b32 v40, s16, 4
80; GCN-O0-NEXT:    v_writelane_b32 v40, s28, 2
81; GCN-O0-NEXT:    v_writelane_b32 v40, s29, 3
82; GCN-O0-NEXT:    s_add_i32 s32, s32, 0x400
83; GCN-O0-NEXT:    v_writelane_b32 v40, s30, 0
84; GCN-O0-NEXT:    v_writelane_b32 v40, s31, 1
85; GCN-O0-NEXT:    ;;#ASMSTART
86; GCN-O0-NEXT:    ; def s16
87; GCN-O0-NEXT:    ;;#ASMEND
88; GCN-O0-NEXT:    ; implicit-def: $vgpr39 : SGPR spill to VGPR lane
89; GCN-O0-NEXT:    v_writelane_b32 v39, s16, 0
90; GCN-O0-NEXT:    s_or_saveexec_b64 s[28:29], -1
91; GCN-O0-NEXT:    buffer_store_dword v39, off, s[0:3], s33 ; 4-byte Folded Spill
92; GCN-O0-NEXT:    s_mov_b64 exec, s[28:29]
93; GCN-O0-NEXT:    s_getpc_b64 s[16:17]
94; GCN-O0-NEXT:    s_add_u32 s16, s16, ext_func@gotpcrel32@lo+4
95; GCN-O0-NEXT:    s_addc_u32 s17, s17, ext_func@gotpcrel32@hi+12
96; GCN-O0-NEXT:    s_load_dwordx2 s[16:17], s[16:17], 0x0
97; GCN-O0-NEXT:    s_mov_b64 s[22:23], s[2:3]
98; GCN-O0-NEXT:    s_mov_b64 s[20:21], s[0:1]
99; GCN-O0-NEXT:    s_mov_b64 s[0:1], s[20:21]
100; GCN-O0-NEXT:    s_mov_b64 s[2:3], s[22:23]
101; GCN-O0-NEXT:    s_waitcnt lgkmcnt(0)
102; GCN-O0-NEXT:    s_swappc_b64 s[30:31], s[16:17]
103; GCN-O0-NEXT:    s_or_saveexec_b64 s[28:29], -1
104; GCN-O0-NEXT:    buffer_load_dword v39, off, s[0:3], s33 ; 4-byte Folded Reload
105; GCN-O0-NEXT:    s_mov_b64 exec, s[28:29]
106; GCN-O0-NEXT:    s_waitcnt vmcnt(0)
107; GCN-O0-NEXT:    v_readlane_b32 s4, v39, 0
108; GCN-O0-NEXT:    ; implicit-def: $sgpr6_sgpr7
109; GCN-O0-NEXT:    v_mov_b32_e32 v0, s6
110; GCN-O0-NEXT:    v_mov_b32_e32 v1, s7
111; GCN-O0-NEXT:    v_mov_b32_e32 v2, s4
112; GCN-O0-NEXT:    global_store_dword v[0:1], v2, off
113; GCN-O0-NEXT:    s_waitcnt vmcnt(0)
114; GCN-O0-NEXT:    v_readlane_b32 s31, v40, 1
115; GCN-O0-NEXT:    v_readlane_b32 s30, v40, 0
116; GCN-O0-NEXT:    s_mov_b32 s32, s33
117; GCN-O0-NEXT:    v_readlane_b32 s4, v40, 4
118; GCN-O0-NEXT:    v_readlane_b32 s28, v40, 2
119; GCN-O0-NEXT:    v_readlane_b32 s29, v40, 3
120; GCN-O0-NEXT:    s_xor_saveexec_b64 s[6:7], -1
121; GCN-O0-NEXT:    buffer_load_dword v39, off, s[0:3], s33 offset:8 ; 4-byte Folded Reload
122; GCN-O0-NEXT:    s_mov_b64 exec, -1
123; GCN-O0-NEXT:    buffer_load_dword v40, off, s[0:3], s33 offset:4 ; 4-byte Folded Reload
124; GCN-O0-NEXT:    s_mov_b64 exec, s[6:7]
125; GCN-O0-NEXT:    s_mov_b32 s33, s4
126; GCN-O0-NEXT:    s_waitcnt vmcnt(0)
127; GCN-O0-NEXT:    s_setpc_b64 s[30:31]
128  %sgpr = call i32 asm sideeffect "; def $0", "=s" () #0
129  call void @ext_func()
130  store volatile i32 %sgpr, ptr addrspace(1) undef
131  ret void
132}
133
134declare void @ext_func();
135
136attributes #0 = { nounwind "amdgpu-num-vgpr"="41" "amdgpu-num-sgpr"="34"}
137