1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -O0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck %s 3 4@V1 = protected local_unnamed_addr addrspace(1) global i32 0, align 4 5@V2 = protected local_unnamed_addr addrspace(1) global i32 0, align 4 6@Q = internal addrspace(3) global i8 poison, align 16 7 8; Test spill placement of VGPR reload in %bb.194 relative to the SGPR 9; reload used for the exec mask. The buffer_load_dword should be after 10; the s_or_b64 exec. 11define amdgpu_kernel void @__omp_offloading_16_dd2df_main_l9() { 12; CHECK-LABEL: __omp_offloading_16_dd2df_main_l9: 13; CHECK: ; %bb.0: ; %bb 14; CHECK-NEXT: s_add_u32 s0, s0, s15 15; CHECK-NEXT: s_addc_u32 s1, s1, 0 16; CHECK-NEXT: v_mov_b32_e32 v1, v0 17; CHECK-NEXT: v_mov_b32_e32 v0, 0 18; CHECK-NEXT: global_load_ushort v2, v0, s[4:5] offset:4 19; CHECK-NEXT: s_waitcnt vmcnt(0) 20; CHECK-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:4 ; 4-byte Folded Spill 21; CHECK-NEXT: ; implicit-def: $sgpr4 22; CHECK-NEXT: s_mov_b32 s4, 0 23; CHECK-NEXT: v_cmp_eq_u32_e64 s[6:7], v1, s4 24; CHECK-NEXT: v_mov_b32_e32 v1, 0 25; CHECK-NEXT: ds_write_b8 v0, v1 26; CHECK-NEXT: s_mov_b64 s[4:5], exec 27; CHECK-NEXT: ; implicit-def: $vgpr3 : SGPR spill to VGPR lane 28; CHECK-NEXT: v_writelane_b32 v3, s4, 0 29; CHECK-NEXT: v_writelane_b32 v3, s5, 1 30; CHECK-NEXT: s_or_saveexec_b64 s[8:9], -1 31; CHECK-NEXT: buffer_store_dword v3, off, s[0:3], 0 ; 4-byte Folded Spill 32; CHECK-NEXT: s_mov_b64 exec, s[8:9] 33; CHECK-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7] 34; CHECK-NEXT: s_mov_b64 exec, s[4:5] 35; CHECK-NEXT: s_cbranch_execz .LBB0_2 36; CHECK-NEXT: ; %bb.1: ; %bb193 37; CHECK-NEXT: .LBB0_2: ; %bb194 38; CHECK-NEXT: s_or_saveexec_b64 s[8:9], -1 39; CHECK-NEXT: buffer_load_dword v3, off, s[0:3], 0 ; 4-byte Folded Reload 40; CHECK-NEXT: s_mov_b64 exec, s[8:9] 41; CHECK-NEXT: s_waitcnt vmcnt(0) 42; CHECK-NEXT: v_readlane_b32 s4, v3, 0 43; CHECK-NEXT: v_readlane_b32 s5, v3, 1 44; CHECK-NEXT: s_or_b64 exec, exec, s[4:5] 45; CHECK-NEXT: buffer_load_dword v0, off, s[0:3], 0 offset:4 ; 4-byte Folded Reload 46; CHECK-NEXT: s_mov_b32 s4, 0xffff 47; CHECK-NEXT: s_waitcnt vmcnt(0) 48; CHECK-NEXT: v_and_b32_e64 v0, s4, v0 49; CHECK-NEXT: s_mov_b32 s4, 0 50; CHECK-NEXT: v_cmp_ne_u32_e64 s[4:5], v0, s4 51; CHECK-NEXT: s_and_b64 vcc, exec, s[4:5] 52; CHECK-NEXT: s_cbranch_vccnz .LBB0_4 53; CHECK-NEXT: ; %bb.3: ; %bb201 54; CHECK-NEXT: buffer_load_dword v1, off, s[0:3], 0 offset:4 ; 4-byte Folded Reload 55; CHECK-NEXT: s_getpc_b64 s[4:5] 56; CHECK-NEXT: s_add_u32 s4, s4, V2@rel32@lo+4 57; CHECK-NEXT: s_addc_u32 s5, s5, V2@rel32@hi+12 58; CHECK-NEXT: v_mov_b32_e32 v0, 0 59; CHECK-NEXT: s_waitcnt vmcnt(0) 60; CHECK-NEXT: global_store_short v0, v1, s[4:5] 61; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) 62; CHECK-NEXT: s_barrier 63; CHECK-NEXT: s_trap 2 64; CHECK-NEXT: ; divergent unreachable 65; CHECK-NEXT: .LBB0_4: ; %UnifiedReturnBlock 66; CHECK-NEXT: s_endpgm 67bb: 68 %i10 = tail call i32 @llvm.amdgcn.workitem.id.x() 69 %i13 = tail call align 4 dereferenceable(64) ptr addrspace(4) @llvm.amdgcn.dispatch.ptr() 70 %i14 = getelementptr i8, ptr addrspace(4) %i13, i64 4 71 %i15 = load i16, ptr addrspace(4) %i14, align 4 72 %i22 = icmp eq i32 %i10, 0 73 store i8 0, ptr addrspace(3) @Q 74 br i1 %i22, label %bb193, label %bb194 75 76bb193: ; preds = %bb190 77 br label %bb194 78 79bb194: ; preds = %bb193, %bb190 80 %i196 = icmp eq i16 %i15, 0 81 br i1 %i196, label %bb201, label %bb202 82 83bb201: ; preds = %bb194 84 store i16 %i15, ptr addrspace(1) @V2 85 call void @llvm.amdgcn.s.barrier() 86 tail call void @llvm.trap() 87 unreachable 88 89bb202: ; preds = %bb194, %bb170, %bb93 90 ret void 91} 92 93declare hidden void @__keep_alive() 94declare i32 @llvm.amdgcn.workitem.id.x() 95declare align 4 ptr addrspace(4) @llvm.amdgcn.dispatch.ptr() 96declare void @llvm.assume(i1 noundef) 97declare void @llvm.amdgcn.s.barrier() 98declare void @llvm.trap() 99 100!llvm.module.flags = !{!0} 101!0 = !{i32 1, !"amdhsa_code_object_version", i32 500} 102