xref: /llvm-project/llvm/test/CodeGen/AMDGPU/load-constant-f32.ll (revision 6548b6354d1d990e1c98736f5e7c3de876bedc8e)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
2; RUN: llc -mtriple=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GFX6 %s
3; RUN: llc -mtriple=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG %s
4; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX12 %s
5
6; Tests whether a load chain of 8 constants gets vectorized into a wider load.
7define amdgpu_kernel void @constant_load_v8f32(ptr addrspace(4) noalias nocapture readonly %weights, ptr addrspace(1) noalias nocapture %out_ptr) {
8; GFX6-LABEL: constant_load_v8f32:
9; GFX6:       ; %bb.0: ; %entry
10; GFX6-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x9
11; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
12; GFX6-NEXT:    s_load_dword s16, s[10:11], 0x0
13; GFX6-NEXT:    s_load_dwordx8 s[0:7], s[8:9], 0x0
14; GFX6-NEXT:    s_mov_b32 s15, 0xf000
15; GFX6-NEXT:    s_mov_b32 s14, -1
16; GFX6-NEXT:    s_mov_b32 s12, s10
17; GFX6-NEXT:    s_mov_b32 s13, s11
18; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
19; GFX6-NEXT:    v_mov_b32_e32 v0, s16
20; GFX6-NEXT:    v_add_f32_e32 v0, s0, v0
21; GFX6-NEXT:    v_add_f32_e32 v0, s1, v0
22; GFX6-NEXT:    v_add_f32_e32 v0, s2, v0
23; GFX6-NEXT:    v_add_f32_e32 v0, s3, v0
24; GFX6-NEXT:    v_add_f32_e32 v0, s4, v0
25; GFX6-NEXT:    v_add_f32_e32 v0, s5, v0
26; GFX6-NEXT:    v_add_f32_e32 v0, s6, v0
27; GFX6-NEXT:    v_add_f32_e32 v0, s7, v0
28; GFX6-NEXT:    buffer_store_dword v0, off, s[12:15], 0
29; GFX6-NEXT:    s_endpgm
30;
31; EG-LABEL: constant_load_v8f32:
32; EG:       ; %bb.0: ; %entry
33; EG-NEXT:    ALU 1, @12, KC0[CB0:0-32], KC1[]
34; EG-NEXT:    TEX 2 @6
35; EG-NEXT:    ALU 9, @14, KC0[CB0:0-32], KC1[]
36; EG-NEXT:    MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
37; EG-NEXT:    CF_END
38; EG-NEXT:    PAD
39; EG-NEXT:    Fetch clause starting at 6:
40; EG-NEXT:     VTX_READ_128 T2.XYZW, T1.X, 0, #1
41; EG-NEXT:     VTX_READ_32 T0.X, T0.X, 0, #1
42; EG-NEXT:     VTX_READ_128 T1.XYZW, T1.X, 16, #1
43; EG-NEXT:    ALU clause starting at 12:
44; EG-NEXT:     MOV T0.X, KC0[2].Z,
45; EG-NEXT:     MOV * T1.X, KC0[2].Y,
46; EG-NEXT:    ALU clause starting at 14:
47; EG-NEXT:     ADD * T0.W, T2.X, T0.X,
48; EG-NEXT:     ADD * T0.W, T2.Y, PV.W,
49; EG-NEXT:     ADD * T0.W, T2.Z, PV.W,
50; EG-NEXT:     ADD * T0.W, T2.W, PV.W,
51; EG-NEXT:     ADD * T0.W, T1.X, PV.W,
52; EG-NEXT:     ADD * T0.W, T1.Y, PV.W,
53; EG-NEXT:     ADD * T0.W, T1.Z, PV.W,
54; EG-NEXT:     ADD T0.X, T1.W, PV.W,
55; EG-NEXT:     LSHR * T1.X, KC0[2].Z, literal.x,
56; EG-NEXT:    2(2.802597e-45), 0(0.000000e+00)
57;
58; GFX12-LABEL: constant_load_v8f32:
59; GFX12:       ; %bb.0: ; %entry
60; GFX12-NEXT:    s_load_b128 s[8:11], s[4:5], 0x24
61; GFX12-NEXT:    s_wait_kmcnt 0x0
62; GFX12-NEXT:    s_load_b32 s12, s[10:11], 0x0
63; GFX12-NEXT:    s_load_b256 s[0:7], s[8:9], 0x0
64; GFX12-NEXT:    s_wait_kmcnt 0x0
65; GFX12-NEXT:    s_add_f32 s0, s0, s12
66; GFX12-NEXT:    s_delay_alu instid0(SALU_CYCLE_3) | instskip(NEXT) | instid1(SALU_CYCLE_3)
67; GFX12-NEXT:    s_add_f32 s0, s1, s0
68; GFX12-NEXT:    s_add_f32 s0, s2, s0
69; GFX12-NEXT:    s_delay_alu instid0(SALU_CYCLE_3) | instskip(NEXT) | instid1(SALU_CYCLE_3)
70; GFX12-NEXT:    s_add_f32 s0, s3, s0
71; GFX12-NEXT:    s_add_f32 s0, s4, s0
72; GFX12-NEXT:    s_delay_alu instid0(SALU_CYCLE_3) | instskip(NEXT) | instid1(SALU_CYCLE_3)
73; GFX12-NEXT:    s_add_f32 s0, s5, s0
74; GFX12-NEXT:    s_add_f32 s0, s6, s0
75; GFX12-NEXT:    s_delay_alu instid0(SALU_CYCLE_3) | instskip(NEXT) | instid1(SALU_CYCLE_3)
76; GFX12-NEXT:    s_add_f32 s0, s7, s0
77; GFX12-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0
78; GFX12-NEXT:    global_store_b32 v0, v1, s[10:11]
79; GFX12-NEXT:    s_endpgm
80entry:
81  %out_ptr.promoted = load float, ptr addrspace(1) %out_ptr, align 4
82  %tmp = load float, ptr addrspace(4) %weights, align 4
83  %add = fadd float %tmp, %out_ptr.promoted
84  %arrayidx.1 = getelementptr inbounds float, ptr addrspace(4) %weights, i64 1
85  %tmp1 = load float, ptr addrspace(4) %arrayidx.1, align 4
86  %add.1 = fadd float %tmp1, %add
87  %arrayidx.2 = getelementptr inbounds float, ptr addrspace(4) %weights, i64 2
88  %tmp2 = load float, ptr addrspace(4) %arrayidx.2, align 4
89  %add.2 = fadd float %tmp2, %add.1
90  %arrayidx.3 = getelementptr inbounds float, ptr addrspace(4) %weights, i64 3
91  %tmp3 = load float, ptr addrspace(4) %arrayidx.3, align 4
92  %add.3 = fadd float %tmp3, %add.2
93  %arrayidx.4 = getelementptr inbounds float, ptr addrspace(4) %weights, i64 4
94  %tmp4 = load float, ptr addrspace(4) %arrayidx.4, align 4
95  %add.4 = fadd float %tmp4, %add.3
96  %arrayidx.5 = getelementptr inbounds float, ptr addrspace(4) %weights, i64 5
97  %tmp5 = load float, ptr addrspace(4) %arrayidx.5, align 4
98  %add.5 = fadd float %tmp5, %add.4
99  %arrayidx.6 = getelementptr inbounds float, ptr addrspace(4) %weights, i64 6
100  %tmp6 = load float, ptr addrspace(4) %arrayidx.6, align 4
101  %add.6 = fadd float %tmp6, %add.5
102  %arrayidx.7 = getelementptr inbounds float, ptr addrspace(4) %weights, i64 7
103  %tmp7 = load float, ptr addrspace(4) %arrayidx.7, align 4
104  %add.7 = fadd float %tmp7, %add.6
105  store float %add.7, ptr addrspace(1) %out_ptr, align 4
106  ret void
107}
108