1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 -O0 -verify-machineinstrs %s -o - | FileCheck %s 3 4; The forced spill to preserve the scratch VGPR require the voffset to hold the large offset 5; value in the MUBUF instruction being emitted before s_cbranch_scc1 as it clobbers the SCC. 6 7define amdgpu_kernel void @test_kernel(i32 %val) #0 { 8; CHECK-LABEL: test_kernel: 9; CHECK: ; %bb.0: 10; CHECK-NEXT: s_mov_b32 s33, 0 11; CHECK-NEXT: s_mov_b32 s32, 0x180000 12; CHECK-NEXT: s_add_u32 flat_scratch_lo, s12, s17 13; CHECK-NEXT: s_addc_u32 flat_scratch_hi, s13, 0 14; CHECK-NEXT: s_add_u32 s0, s0, s17 15; CHECK-NEXT: s_addc_u32 s1, s1, 0 16; CHECK-NEXT: ; implicit-def: $vgpr40 : SGPR spill to VGPR lane 17; CHECK-NEXT: v_writelane_b32 v40, s16, 0 18; CHECK-NEXT: s_mov_b32 s13, s15 19; CHECK-NEXT: s_mov_b32 s12, s14 20; CHECK-NEXT: v_readlane_b32 s14, v40, 0 21; CHECK-NEXT: s_mov_b64 s[16:17], s[8:9] 22; CHECK-NEXT: s_load_dword s8, s[16:17], 0x0 23; CHECK-NEXT: s_waitcnt lgkmcnt(0) 24; CHECK-NEXT: v_writelane_b32 v40, s8, 1 25; CHECK-NEXT: ;;#ASMSTART 26; CHECK-NEXT: ; def vgpr10 27; CHECK-NEXT: ;;#ASMEND 28; CHECK-NEXT: s_add_i32 s8, s33, 0x100100 29; CHECK-NEXT: buffer_store_dword v10, off, s[0:3], s8 ; 4-byte Folded Spill 30; CHECK-NEXT: s_mov_b64 s[18:19], 8 31; CHECK-NEXT: s_mov_b32 s8, s16 32; CHECK-NEXT: s_mov_b32 s9, s17 33; CHECK-NEXT: s_mov_b32 s16, s18 34; CHECK-NEXT: s_mov_b32 s15, s19 35; CHECK-NEXT: s_add_u32 s8, s8, s16 36; CHECK-NEXT: s_addc_u32 s15, s9, s15 37; CHECK-NEXT: ; kill: def $sgpr8 killed $sgpr8 def $sgpr8_sgpr9 38; CHECK-NEXT: s_mov_b32 s9, s15 39; CHECK-NEXT: s_mov_b32 s15, 0x2000 40; CHECK-NEXT: s_mov_b32 s18, s15 41; CHECK-NEXT: s_getpc_b64 s[16:17] 42; CHECK-NEXT: s_add_u32 s16, s16, device_func@gotpcrel32@lo+4 43; CHECK-NEXT: s_addc_u32 s17, s17, device_func@gotpcrel32@hi+12 44; CHECK-NEXT: s_load_dwordx2 s[16:17], s[16:17], 0x0 45; CHECK-NEXT: s_mov_b64 s[22:23], s[2:3] 46; CHECK-NEXT: s_mov_b64 s[20:21], s[0:1] 47; CHECK-NEXT: s_mov_b32 s15, 20 48; CHECK-NEXT: v_lshlrev_b32_e64 v2, s15, v2 49; CHECK-NEXT: s_mov_b32 s15, 10 50; CHECK-NEXT: v_lshlrev_b32_e64 v1, s15, v1 51; CHECK-NEXT: v_or3_b32 v31, v0, v1, v2 52; CHECK-NEXT: ; implicit-def: $sgpr15 53; CHECK-NEXT: s_mov_b64 s[0:1], s[20:21] 54; CHECK-NEXT: s_mov_b64 s[2:3], s[22:23] 55; CHECK-NEXT: v_mov_b32_e32 v0, s18 56; CHECK-NEXT: s_waitcnt lgkmcnt(0) 57; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17] 58; CHECK-NEXT: s_add_i32 s4, s33, 0x100100 59; CHECK-NEXT: buffer_load_dword v10, off, s[0:3], s4 ; 4-byte Folded Reload 60; CHECK-NEXT: v_readlane_b32 s4, v40, 1 61; CHECK-NEXT: s_mov_b32 s5, 0 62; CHECK-NEXT: s_cmp_eq_u32 s4, s5 63; CHECK-NEXT: v_mov_b32_e32 v0, 0x4000 64; CHECK-NEXT: s_waitcnt vmcnt(0) 65; CHECK-NEXT: buffer_store_dword v10, v0, s[0:3], s33 offen ; 4-byte Folded Spill 66; CHECK-NEXT: s_cbranch_scc1 .LBB0_2 67; CHECK-NEXT: ; %bb.1: ; %store 68; CHECK-NEXT: s_add_i32 s4, s33, 0x100000 69; CHECK-NEXT: buffer_load_dword v1, off, s[0:3], s4 ; 4-byte Folded Reload 70; CHECK-NEXT: ; implicit-def: $sgpr4 71; CHECK-NEXT: v_mov_b32_e32 v0, s4 72; CHECK-NEXT: s_waitcnt vmcnt(0) 73; CHECK-NEXT: ds_write_b32 v0, v1 74; CHECK-NEXT: s_endpgm 75; CHECK-NEXT: .LBB0_2: ; %end 76; CHECK-NEXT: s_endpgm 77 %arr = alloca < 1339 x i32>, align 8192, addrspace(5) 78 %cmp = icmp ne i32 %val, 0 79 %vreg = call i32 asm sideeffect "; def vgpr10", "={v10}"() 80 call void @device_func(ptr addrspace(5) %arr) 81 br i1 %cmp, label %store, label %end 82 83store: 84 store volatile i32 %vreg, ptr addrspace(3) undef 85 ret void 86 87end: 88 ret void 89} 90 91declare void @device_func(ptr addrspace(5)) 92 93attributes #0 = { nounwind "frame-pointer"="all" } 94 95!llvm.module.flags = !{!0} 96!0 = !{i32 1, !"amdhsa_code_object_version", i32 500} 97