xref: /llvm-project/llvm/test/CodeGen/AMDGPU/fadd.ll (revision 9e9907f1cfa424366fba58d9520f9305b537cec9)
1; RUN: llc -mtriple=amdgcn -verify-machineinstrs < %s | FileCheck %s -check-prefix=SI -check-prefix=FUNC
2; RUN: llc -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck %s -check-prefix=SI -check-prefix=FUNC
3; RUN: llc -mtriple=r600 -mcpu=redwood < %s | FileCheck %s -check-prefix=R600 -check-prefix=FUNC
4
5; FUNC-LABEL: {{^}}fadd_f32:
6; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z, KC0[2].W
7; SI: v_add_f32
8define amdgpu_kernel void @fadd_f32(ptr addrspace(1) %out, float %a, float %b) #0 {
9   %add = fadd float %a, %b
10   store float %add, ptr addrspace(1) %out, align 4
11   ret void
12}
13
14; FUNC-LABEL: {{^}}fadd_v2f32:
15; R600-DAG: ADD {{\** *}}T{{[0-9]\.[XYZW]}}, KC0[3].X, KC0[3].Z
16; R600-DAG: ADD {{\** *}}T{{[0-9]\.[XYZW]}}, KC0[2].W, KC0[3].Y
17; SI: v_add_f32
18; SI: v_add_f32
19define amdgpu_kernel void @fadd_v2f32(ptr addrspace(1) %out, <2 x float> %a, <2 x float> %b) #0 {
20  %add = fadd <2 x float> %a, %b
21  store <2 x float> %add, ptr addrspace(1) %out, align 8
22  ret void
23}
24
25; FUNC-LABEL: {{^}}fadd_v4f32:
26; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
27; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
28; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
29; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
30; SI: v_add_f32
31; SI: v_add_f32
32; SI: v_add_f32
33; SI: v_add_f32
34define amdgpu_kernel void @fadd_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
35  %b_ptr = getelementptr <4 x float>, ptr addrspace(1) %in, i32 1
36  %a = load <4 x float>, ptr addrspace(1) %in, align 16
37  %b = load <4 x float>, ptr addrspace(1) %b_ptr, align 16
38  %result = fadd <4 x float> %a, %b
39  store <4 x float> %result, ptr addrspace(1) %out, align 16
40  ret void
41}
42
43; FUNC-LABEL: {{^}}fadd_v8f32:
44; R600: ADD
45; R600: ADD
46; R600: ADD
47; R600: ADD
48; R600: ADD
49; R600: ADD
50; R600: ADD
51; R600: ADD
52; SI: v_add_f32
53; SI: v_add_f32
54; SI: v_add_f32
55; SI: v_add_f32
56; SI: v_add_f32
57; SI: v_add_f32
58; SI: v_add_f32
59; SI: v_add_f32
60define amdgpu_kernel void @fadd_v8f32(ptr addrspace(1) %out, <8 x float> %a, <8 x float> %b) #0 {
61  %add = fadd <8 x float> %a, %b
62  store <8 x float> %add, ptr addrspace(1) %out, align 32
63  ret void
64}
65
66; FUNC-LABEL: {{^}}fadd_0_nsz_attr_f32:
67; SI-NOT: v_add_f32
68define amdgpu_kernel void @fadd_0_nsz_attr_f32(ptr addrspace(1) %out, float %a) #1 {
69   %add = fadd nsz float %a, 0.0
70   store float %add, ptr addrspace(1) %out, align 4
71   ret void
72}
73
74attributes #0 = { nounwind }
75attributes #1 = { nounwind "no-signed-zeros-fp-math"="true" }
76