xref: /llvm-project/llvm/test/Transforms/LoopVectorize/AMDGPU/packed-math.ll (revision 1e08a08a871830c708f81a5f28b794fb32ae0dc2)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s -passes=loop-vectorize,dce,instcombine -S | FileCheck -check-prefix=GFX9 %s
3; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=fiji < %s -passes=loop-vectorize,dce,instcombine -S | FileCheck -check-prefix=VI %s
4; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii < %s -passes=loop-vectorize,dce,instcombine -S | FileCheck -check-prefix=CI %s
5
6define half @vectorize_v2f16_loop(half addrspace(1)* noalias %s) {
7; GFX9-LABEL: @vectorize_v2f16_loop(
8; GFX9-NEXT:  entry:
9; GFX9-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
10; GFX9:       vector.ph:
11; GFX9-NEXT:    br label [[VECTOR_BODY:%.*]]
12; GFX9:       vector.body:
13; GFX9-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
14; GFX9-NEXT:    [[VEC_PHI:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ]
15; GFX9-NEXT:    [[VEC_PHI1:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
16; GFX9-NEXT:    [[TMP0:%.*]] = getelementptr inbounds half, half addrspace(1)* [[S:%.*]], i64 [[INDEX]]
17; GFX9-NEXT:    [[TMP1:%.*]] = bitcast half addrspace(1)* [[TMP0]] to <2 x half> addrspace(1)*
18; GFX9-NEXT:    [[WIDE_LOAD:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP1]], align 2
19; GFX9-NEXT:    [[TMP2:%.*]] = getelementptr inbounds half, half addrspace(1)* [[TMP0]], i64 2
20; GFX9-NEXT:    [[TMP3:%.*]] = bitcast half addrspace(1)* [[TMP2]] to <2 x half> addrspace(1)*
21; GFX9-NEXT:    [[WIDE_LOAD2:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP3]], align 2
22; GFX9-NEXT:    [[TMP4]] = fadd fast <2 x half> [[VEC_PHI]], [[WIDE_LOAD]]
23; GFX9-NEXT:    [[TMP5]] = fadd fast <2 x half> [[VEC_PHI1]], [[WIDE_LOAD2]]
24; GFX9-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
25; GFX9-NEXT:    [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
26; GFX9-NEXT:    br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
27; GFX9:       middle.block:
28; GFX9-NEXT:    [[BIN_RDX:%.*]] = fadd fast <2 x half> [[TMP5]], [[TMP4]]
29; GFX9-NEXT:    [[TMP7:%.*]] = call fast half @llvm.vector.reduce.fadd.v2f16(half 0xH8000, <2 x half> [[BIN_RDX]])
30; GFX9-NEXT:    br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
31; GFX9:       scalar.ph:
32; GFX9-NEXT:    br label [[FOR_BODY:%.*]]
33; GFX9:       for.body:
34; GFX9-NEXT:    br i1 poison, label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]]
35; GFX9:       for.end:
36; GFX9-NEXT:    [[ADD_LCSSA:%.*]] = phi half [ poison, [[FOR_BODY]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ]
37; GFX9-NEXT:    ret half [[ADD_LCSSA]]
38;
39; VI-LABEL: @vectorize_v2f16_loop(
40; VI-NEXT:  entry:
41; VI-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
42; VI:       vector.ph:
43; VI-NEXT:    br label [[VECTOR_BODY:%.*]]
44; VI:       vector.body:
45; VI-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
46; VI-NEXT:    [[VEC_PHI:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ]
47; VI-NEXT:    [[VEC_PHI1:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
48; VI-NEXT:    [[TMP0:%.*]] = getelementptr inbounds half, half addrspace(1)* [[S:%.*]], i64 [[INDEX]]
49; VI-NEXT:    [[TMP1:%.*]] = bitcast half addrspace(1)* [[TMP0]] to <2 x half> addrspace(1)*
50; VI-NEXT:    [[WIDE_LOAD:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP1]], align 2
51; VI-NEXT:    [[TMP2:%.*]] = getelementptr inbounds half, half addrspace(1)* [[TMP0]], i64 2
52; VI-NEXT:    [[TMP3:%.*]] = bitcast half addrspace(1)* [[TMP2]] to <2 x half> addrspace(1)*
53; VI-NEXT:    [[WIDE_LOAD2:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP3]], align 2
54; VI-NEXT:    [[TMP4]] = fadd fast <2 x half> [[VEC_PHI]], [[WIDE_LOAD]]
55; VI-NEXT:    [[TMP5]] = fadd fast <2 x half> [[VEC_PHI1]], [[WIDE_LOAD2]]
56; VI-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
57; VI-NEXT:    [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
58; VI-NEXT:    br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
59; VI:       middle.block:
60; VI-NEXT:    [[BIN_RDX:%.*]] = fadd fast <2 x half> [[TMP5]], [[TMP4]]
61; VI-NEXT:    [[TMP7:%.*]] = call fast half @llvm.vector.reduce.fadd.v2f16(half 0xH8000, <2 x half> [[BIN_RDX]])
62; VI-NEXT:    br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
63; VI:       scalar.ph:
64; VI-NEXT:    br label [[FOR_BODY:%.*]]
65; VI:       for.body:
66; VI-NEXT:    br i1 poison, label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]]
67; VI:       for.end:
68; VI-NEXT:    [[ADD_LCSSA:%.*]] = phi half [ poison, [[FOR_BODY]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ]
69; VI-NEXT:    ret half [[ADD_LCSSA]]
70;
71; CI-LABEL: @vectorize_v2f16_loop(
72; CI-NEXT:  entry:
73; CI-NEXT:    br label [[FOR_BODY:%.*]]
74; CI:       for.body:
75; CI-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
76; CI-NEXT:    [[Q_04:%.*]] = phi half [ 0xH0000, [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
77; CI-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds half, half addrspace(1)* [[S:%.*]], i64 [[INDVARS_IV]]
78; CI-NEXT:    [[TMP0:%.*]] = load half, half addrspace(1)* [[ARRAYIDX]], align 2
79; CI-NEXT:    [[ADD]] = fadd fast half [[Q_04]], [[TMP0]]
80; CI-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
81; CI-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 256
82; CI-NEXT:    br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
83; CI:       for.end:
84; CI-NEXT:    ret half [[ADD]]
85;
86entry:
87  br label %for.body
88
89for.body:
90  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
91  %q.04 = phi half [ 0.0, %entry ], [ %add, %for.body ]
92  %arrayidx = getelementptr inbounds half, half addrspace(1)* %s, i64 %indvars.iv
93  %0 = load half, half addrspace(1)* %arrayidx, align 2
94  %add = fadd fast half %q.04, %0
95  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
96  %exitcond = icmp eq i64 %indvars.iv.next, 256
97  br i1 %exitcond, label %for.end, label %for.body
98
99for.end:
100  %add.lcssa = phi half [ %add, %for.body ]
101  ret half %add.lcssa
102}
103