xref: /llvm-project/llvm/test/CodeGen/AMDGPU/swdev380865.ll (revision fe8335babba1725e18d6ea94073c3dbb92958bfa)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -stress-regalloc=4 -o - %s | FileCheck %s
3
4; Make sure we can rematerialize split 64-bit constants (which
5; MachineLICM hoisted out of the loop) and avoid spilling inside the
6; loop.
7;
8; MachineLICM originally believed the constant materializes to be
9; rematerializable, but the lowered REG_SEQUENCE uses they coalesece
10; into were not. The InlineSpiller also did not recognize redundant
11; spills inside the loop, so we would repeatedly reload the same
12; values.
13
14define amdgpu_kernel void @_Z6kernelILi4000ELi1EEvPd(ptr addrspace(1) %x.coerce) {
15; CHECK-LABEL: _Z6kernelILi4000ELi1EEvPd:
16; CHECK:       ; %bb.0: ; %entry
17; CHECK-NEXT:    s_mov_b64 s[0:1], 0
18; CHECK-NEXT:    s_load_dword s2, s[0:1], 0x0
19; CHECK-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x0
20; CHECK-NEXT:    s_mov_b32 s4, 0
21; CHECK-NEXT:    s_mov_b32 s0, 0
22; CHECK-NEXT:    s_mov_b32 s5, 0x40280000
23; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
24; CHECK-NEXT:    s_mov_b32 s1, s2
25; CHECK-NEXT:    s_mov_b32 s2, 0
26; CHECK-NEXT:    v_mov_b32_e32 v0, s6
27; CHECK-NEXT:    s_mov_b32 s3, 0x40260000
28; CHECK-NEXT:    v_mov_b32_e32 v1, s7
29; CHECK-NEXT:  .LBB0_1: ; %for.cond4.preheader
30; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
31; CHECK-NEXT:    v_add_f64 v[0:1], v[0:1], 0
32; CHECK-NEXT:    s_mov_b32 s6, 0
33; CHECK-NEXT:    s_mov_b32 s7, 0x40140000
34; CHECK-NEXT:    s_add_i32 s0, s0, s1
35; CHECK-NEXT:    s_cmpk_lt_i32 s0, 0xa00
36; CHECK-NEXT:    v_add_f64 v[0:1], v[0:1], s[6:7]
37; CHECK-NEXT:    s_mov_b32 s6, 0
38; CHECK-NEXT:    s_mov_b32 s7, 0x40180000
39; CHECK-NEXT:    v_add_f64 v[0:1], v[0:1], s[6:7]
40; CHECK-NEXT:    s_mov_b32 s6, 0
41; CHECK-NEXT:    s_mov_b32 s7, 0x401c0000
42; CHECK-NEXT:    v_add_f64 v[0:1], v[0:1], s[6:7]
43; CHECK-NEXT:    s_mov_b32 s6, 0
44; CHECK-NEXT:    s_mov_b32 s7, 0x40220000
45; CHECK-NEXT:    v_add_f64 v[0:1], v[0:1], s[6:7]
46; CHECK-NEXT:    s_mov_b32 s6, 0
47; CHECK-NEXT:    s_mov_b32 s7, 0x40240000
48; CHECK-NEXT:    v_add_f64 v[0:1], v[0:1], s[6:7]
49; CHECK-NEXT:    v_add_f64 v[0:1], v[0:1], s[2:3]
50; CHECK-NEXT:    v_add_f64 v[0:1], v[0:1], s[4:5]
51; CHECK-NEXT:    s_cbranch_scc1 .LBB0_1
52; CHECK-NEXT:  ; %bb.2: ; %for.cond.cleanup.loopexit
53; CHECK-NEXT:    v_mov_b32_e32 v2, 0
54; CHECK-NEXT:    v_mov_b32_e32 v3, 0
55; CHECK-NEXT:    global_store_dwordx2 v[2:3], v[0:1], off
56; CHECK-NEXT:    s_endpgm
57entry:
58  %0 = load i32, ptr addrspace(4) null, align 4
59  %cmp6 = icmp slt i32 0, 2560
60  br i1 %cmp6, label %for.cond4.preheader, label %for.cond.cleanup
61
62for.cond4.preheader:                              ; preds = %for.cond4.preheader, %entry
63  %idx.07 = phi i32 [ %add13, %for.cond4.preheader ], [ 0, %entry ]
64  %arrayidx.promoted = load double, ptr addrspace(1) null, align 8
65  %add9 = fadd contract double %arrayidx.promoted, 0.000000e+00
66  %add9.1 = fadd contract double %add9, 5.000000e+00
67  %add9.2 = fadd contract double %add9.1, 6.000000e+00
68  %add9.3 = fadd contract double %add9.2, 7.000000e+00
69  %add9.4 = fadd contract double %add9.3, 9.000000e+00
70  %add9.5 = fadd contract double %add9.4, 1.000000e+01
71  %add9.6 = fadd contract double %add9.5, 1.100000e+01
72  %add9.7 = fadd contract double %add9.6, 1.200000e+01
73  store double %add9.7, ptr addrspace(1) null, align 8
74  %add13 = add i32 %idx.07, %0
75  %cmp = icmp slt i32 %add13, 2560
76  br i1 %cmp, label %for.cond4.preheader, label %for.cond.cleanup
77
78for.cond.cleanup:                                 ; preds = %for.cond4.preheader, %entry
79  ret void
80}
81
82declare i32 @llvm.amdgcn.workitem.id.x() #0
83declare i32 @llvm.amdgcn.workgroup.id.x() #0
84declare align 4 ptr addrspace(4) @llvm.amdgcn.dispatch.ptr() #0
85
86attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
87