Lines Matching full:m0

11 ; GCN-NEXT: s_mov_b32 m0, 0
13 ; GCN-DAG: s_mov_b32 [[M0_COPY:s[0-9]+]], m0
27 ; TOVGPR: s_mov_b32 m0, [[M0_RESTORE]]
32 ; TOVMEM: s_mov_b32 m0, [[M0_RESTORE]]
34 ; GCN: s_add_i32 s{{[0-9]+}}, m0, 1
37 %m0 = call i32 asm sideeffect "s_mov_b32 m0, 0", "={m0}"() #0
46 %foo = call i32 asm sideeffect "s_add_i32 $0, $1, 1", "=s,{m0}"(i32 %m0) #0
53 ; m0 is killed, so it isn't necessary during the entry block spill to preserve it
56 ; GCN-NOT: v_readlane_b32 m0
57 ; GCN-NOT: s_buffer_store_dword m0
58 ; GCN-NOT: s_buffer_load_dword m0
59 define amdgpu_ps void @spill_kill_m0_lds(ptr addrspace(4) inreg %arg, ptr addrspace(4) inreg %arg1, ptr addrspace(4) inreg %arg2, i32 inreg %m0) #0 {
61 %tmp = call float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 %m0)
71 %interp = call float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 %m0)
81 ; Force save and restore of m0 during SMEM spill
85 ; GCN: ; def m0, 1
87 ; GCN: s_mov_b32 m0, [[REG0]]
90 ; GCN: ; clobber m0
92 ; TOSMEM: s_mov_b32 s2, m0
93 ; TOSMEM: s_add_u32 m0, s3, 0x100
94 ; TOSMEM-NEXT: s_buffer_store_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 8-byte Folded Spill
95 ; TOSMEM: s_mov_b32 m0, s2
102 ; TOSMEM: s_add_u32 m0, s3, 0x100
103 ; TOSMEM-NEXT: s_buffer_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 8-byte Folded Reload
105 ; GCN-NOT: v_readlane_b32 m0
106 ; GCN-NOT: s_buffer_store_dword m0
107 ; GCN-NOT: s_buffer_load_dword m0
108 define amdgpu_kernel void @m0_unavailable_spill(i32 %m0.arg) #0 {
110 %m0 = call i32 asm sideeffect "; def $0, 1", "={m0}"() #0
111 %tmp = call float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 %m0.arg)
112 call void asm sideeffect "; clobber $0", "~{m0}"() #0
129 ; FIXME: RegScavenger::isRegUsed() always returns true if m0 is reserved, so we have to save and restore it
130 ; FIXME-TOSMEM-NOT: m0
131 ; TOSMEM: s_add_u32 m0, s3, {{0x[0-9]+}}
132 ; TOSMEM: s_buffer_store_dword s1, s[88:91], m0 ; 4-byte Folded Spill
133 ; FIXME-TOSMEM-NOT: m0
135 ; TOSMEM: s_add_u32 m0, s3, {{0x[0-9]+}}
137 ; TOSMEM: s_buffer_store_dwordx2 [[REG]], s[88:91], m0 ; 8-byte Folded Spill
138 ; FIXME-TOSMEM-NOT: m0
142 ; TOSMEM: s_mov_b32 m0, -1
144 ; TOSMEM: s_mov_b32 s2, m0
145 ; TOSMEM: s_add_u32 m0, s3, 0x200
146 ; TOSMEM: s_buffer_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[88:91], m0 ; 8-byte Folded Reload
147 ; TOSMEM: s_mov_b32 m0, s2
152 ; FIXME-TOSMEM-NOT: m0
153 ; TOSMEM: s_add_u32 m0, s3, 0x100
154 ; TOSMEM: s_buffer_load_dword s2, s[88:91], m0 ; 4-byte Folded Reload
155 ; FIXME-TOSMEM-NOT: m0
157 ; TOSMEM: s_mov_b32 [[REG1:s[0-9]+]], m0
158 ; TOSMEM: s_add_u32 m0, s3, 0x100
159 ; TOSMEM: s_buffer_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[88:91], m0 ; 8-byte Folded Reload
160 ; TOSMEM: s_mov_b32 m0, [[REG1]]
161 ; TOSMEM: s_mov_b32 m0, -1
164 ; TOSMEM-NOT: m0
165 ; TOSMEM: s_mov_b32 m0, s2
166 ; TOSMEM: ; use m0
171 %m0 = call i32 asm sideeffect "s_mov_b32 m0, 0", "={m0}"() #0
178 call void asm sideeffect "; use $0", "{m0}"(i32 %m0) #0