xref: /llvm-project/llvm/test/Transforms/LoopUnroll/RISCV/unroll.ll (revision b9808e5660f5fe9e7414c0c0b93acd899235471c)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt %s -S -mtriple=riscv64 -passes=loop-unroll -mcpu=sifive-s76 | FileCheck %s
3
4define dso_local void @saxpy(float %a, ptr %x, ptr %y) {
5; CHECK-LABEL: @saxpy(
6; CHECK-NEXT:  entry:
7; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
8; CHECK:       for.body:
9; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT_15:%.*]], [[FOR_BODY]] ]
10; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[X:%.*]], i64 [[INDVARS_IV]]
11; CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4
12; CHECK-NEXT:    [[MUL:%.*]] = fmul fast float [[TMP0]], [[A:%.*]]
13; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[Y:%.*]], i64 [[INDVARS_IV]]
14; CHECK-NEXT:    [[TMP1:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
15; CHECK-NEXT:    [[ADD:%.*]] = fadd fast float [[MUL]], [[TMP1]]
16; CHECK-NEXT:    store float [[ADD]], ptr [[ARRAYIDX2]], align 4
17; CHECK-NEXT:    [[INDVARS_IV_NEXT:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 1
18; CHECK-NEXT:    [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT]]
19; CHECK-NEXT:    [[TMP2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4
20; CHECK-NEXT:    [[MUL_1:%.*]] = fmul fast float [[TMP2]], [[A]]
21; CHECK-NEXT:    [[ARRAYIDX2_1:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT]]
22; CHECK-NEXT:    [[TMP3:%.*]] = load float, ptr [[ARRAYIDX2_1]], align 4
23; CHECK-NEXT:    [[ADD_1:%.*]] = fadd fast float [[MUL_1]], [[TMP3]]
24; CHECK-NEXT:    store float [[ADD_1]], ptr [[ARRAYIDX2_1]], align 4
25; CHECK-NEXT:    [[INDVARS_IV_NEXT_1:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 2
26; CHECK-NEXT:    [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_1]]
27; CHECK-NEXT:    [[TMP4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4
28; CHECK-NEXT:    [[MUL_2:%.*]] = fmul fast float [[TMP4]], [[A]]
29; CHECK-NEXT:    [[ARRAYIDX2_2:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_1]]
30; CHECK-NEXT:    [[TMP5:%.*]] = load float, ptr [[ARRAYIDX2_2]], align 4
31; CHECK-NEXT:    [[ADD_2:%.*]] = fadd fast float [[MUL_2]], [[TMP5]]
32; CHECK-NEXT:    store float [[ADD_2]], ptr [[ARRAYIDX2_2]], align 4
33; CHECK-NEXT:    [[INDVARS_IV_NEXT_2:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 3
34; CHECK-NEXT:    [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_2]]
35; CHECK-NEXT:    [[TMP6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4
36; CHECK-NEXT:    [[MUL_3:%.*]] = fmul fast float [[TMP6]], [[A]]
37; CHECK-NEXT:    [[ARRAYIDX2_3:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_2]]
38; CHECK-NEXT:    [[TMP7:%.*]] = load float, ptr [[ARRAYIDX2_3]], align 4
39; CHECK-NEXT:    [[ADD_3:%.*]] = fadd fast float [[MUL_3]], [[TMP7]]
40; CHECK-NEXT:    store float [[ADD_3]], ptr [[ARRAYIDX2_3]], align 4
41; CHECK-NEXT:    [[INDVARS_IV_NEXT_3:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 4
42; CHECK-NEXT:    [[ARRAYIDX_4:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_3]]
43; CHECK-NEXT:    [[TMP8:%.*]] = load float, ptr [[ARRAYIDX_4]], align 4
44; CHECK-NEXT:    [[MUL_4:%.*]] = fmul fast float [[TMP8]], [[A]]
45; CHECK-NEXT:    [[ARRAYIDX2_4:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_3]]
46; CHECK-NEXT:    [[TMP9:%.*]] = load float, ptr [[ARRAYIDX2_4]], align 4
47; CHECK-NEXT:    [[ADD_4:%.*]] = fadd fast float [[MUL_4]], [[TMP9]]
48; CHECK-NEXT:    store float [[ADD_4]], ptr [[ARRAYIDX2_4]], align 4
49; CHECK-NEXT:    [[INDVARS_IV_NEXT_4:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 5
50; CHECK-NEXT:    [[ARRAYIDX_5:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_4]]
51; CHECK-NEXT:    [[TMP10:%.*]] = load float, ptr [[ARRAYIDX_5]], align 4
52; CHECK-NEXT:    [[MUL_5:%.*]] = fmul fast float [[TMP10]], [[A]]
53; CHECK-NEXT:    [[ARRAYIDX2_5:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_4]]
54; CHECK-NEXT:    [[TMP11:%.*]] = load float, ptr [[ARRAYIDX2_5]], align 4
55; CHECK-NEXT:    [[ADD_5:%.*]] = fadd fast float [[MUL_5]], [[TMP11]]
56; CHECK-NEXT:    store float [[ADD_5]], ptr [[ARRAYIDX2_5]], align 4
57; CHECK-NEXT:    [[INDVARS_IV_NEXT_5:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 6
58; CHECK-NEXT:    [[ARRAYIDX_6:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_5]]
59; CHECK-NEXT:    [[TMP12:%.*]] = load float, ptr [[ARRAYIDX_6]], align 4
60; CHECK-NEXT:    [[MUL_6:%.*]] = fmul fast float [[TMP12]], [[A]]
61; CHECK-NEXT:    [[ARRAYIDX2_6:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_5]]
62; CHECK-NEXT:    [[TMP13:%.*]] = load float, ptr [[ARRAYIDX2_6]], align 4
63; CHECK-NEXT:    [[ADD_6:%.*]] = fadd fast float [[MUL_6]], [[TMP13]]
64; CHECK-NEXT:    store float [[ADD_6]], ptr [[ARRAYIDX2_6]], align 4
65; CHECK-NEXT:    [[INDVARS_IV_NEXT_6:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 7
66; CHECK-NEXT:    [[ARRAYIDX_7:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_6]]
67; CHECK-NEXT:    [[TMP14:%.*]] = load float, ptr [[ARRAYIDX_7]], align 4
68; CHECK-NEXT:    [[MUL_7:%.*]] = fmul fast float [[TMP14]], [[A]]
69; CHECK-NEXT:    [[ARRAYIDX2_7:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_6]]
70; CHECK-NEXT:    [[TMP15:%.*]] = load float, ptr [[ARRAYIDX2_7]], align 4
71; CHECK-NEXT:    [[ADD_7:%.*]] = fadd fast float [[MUL_7]], [[TMP15]]
72; CHECK-NEXT:    store float [[ADD_7]], ptr [[ARRAYIDX2_7]], align 4
73; CHECK-NEXT:    [[INDVARS_IV_NEXT_7:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 8
74; CHECK-NEXT:    [[ARRAYIDX_8:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_7]]
75; CHECK-NEXT:    [[TMP16:%.*]] = load float, ptr [[ARRAYIDX_8]], align 4
76; CHECK-NEXT:    [[MUL_8:%.*]] = fmul fast float [[TMP16]], [[A]]
77; CHECK-NEXT:    [[ARRAYIDX2_8:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_7]]
78; CHECK-NEXT:    [[TMP17:%.*]] = load float, ptr [[ARRAYIDX2_8]], align 4
79; CHECK-NEXT:    [[ADD_8:%.*]] = fadd fast float [[MUL_8]], [[TMP17]]
80; CHECK-NEXT:    store float [[ADD_8]], ptr [[ARRAYIDX2_8]], align 4
81; CHECK-NEXT:    [[INDVARS_IV_NEXT_8:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 9
82; CHECK-NEXT:    [[ARRAYIDX_9:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_8]]
83; CHECK-NEXT:    [[TMP18:%.*]] = load float, ptr [[ARRAYIDX_9]], align 4
84; CHECK-NEXT:    [[MUL_9:%.*]] = fmul fast float [[TMP18]], [[A]]
85; CHECK-NEXT:    [[ARRAYIDX2_9:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_8]]
86; CHECK-NEXT:    [[TMP19:%.*]] = load float, ptr [[ARRAYIDX2_9]], align 4
87; CHECK-NEXT:    [[ADD_9:%.*]] = fadd fast float [[MUL_9]], [[TMP19]]
88; CHECK-NEXT:    store float [[ADD_9]], ptr [[ARRAYIDX2_9]], align 4
89; CHECK-NEXT:    [[INDVARS_IV_NEXT_9:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 10
90; CHECK-NEXT:    [[ARRAYIDX_10:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_9]]
91; CHECK-NEXT:    [[TMP20:%.*]] = load float, ptr [[ARRAYIDX_10]], align 4
92; CHECK-NEXT:    [[MUL_10:%.*]] = fmul fast float [[TMP20]], [[A]]
93; CHECK-NEXT:    [[ARRAYIDX2_10:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_9]]
94; CHECK-NEXT:    [[TMP21:%.*]] = load float, ptr [[ARRAYIDX2_10]], align 4
95; CHECK-NEXT:    [[ADD_10:%.*]] = fadd fast float [[MUL_10]], [[TMP21]]
96; CHECK-NEXT:    store float [[ADD_10]], ptr [[ARRAYIDX2_10]], align 4
97; CHECK-NEXT:    [[INDVARS_IV_NEXT_10:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 11
98; CHECK-NEXT:    [[ARRAYIDX_11:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_10]]
99; CHECK-NEXT:    [[TMP22:%.*]] = load float, ptr [[ARRAYIDX_11]], align 4
100; CHECK-NEXT:    [[MUL_11:%.*]] = fmul fast float [[TMP22]], [[A]]
101; CHECK-NEXT:    [[ARRAYIDX2_11:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_10]]
102; CHECK-NEXT:    [[TMP23:%.*]] = load float, ptr [[ARRAYIDX2_11]], align 4
103; CHECK-NEXT:    [[ADD_11:%.*]] = fadd fast float [[MUL_11]], [[TMP23]]
104; CHECK-NEXT:    store float [[ADD_11]], ptr [[ARRAYIDX2_11]], align 4
105; CHECK-NEXT:    [[INDVARS_IV_NEXT_11:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 12
106; CHECK-NEXT:    [[ARRAYIDX_12:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_11]]
107; CHECK-NEXT:    [[TMP24:%.*]] = load float, ptr [[ARRAYIDX_12]], align 4
108; CHECK-NEXT:    [[MUL_12:%.*]] = fmul fast float [[TMP24]], [[A]]
109; CHECK-NEXT:    [[ARRAYIDX2_12:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_11]]
110; CHECK-NEXT:    [[TMP25:%.*]] = load float, ptr [[ARRAYIDX2_12]], align 4
111; CHECK-NEXT:    [[ADD_12:%.*]] = fadd fast float [[MUL_12]], [[TMP25]]
112; CHECK-NEXT:    store float [[ADD_12]], ptr [[ARRAYIDX2_12]], align 4
113; CHECK-NEXT:    [[INDVARS_IV_NEXT_12:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 13
114; CHECK-NEXT:    [[ARRAYIDX_13:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_12]]
115; CHECK-NEXT:    [[TMP26:%.*]] = load float, ptr [[ARRAYIDX_13]], align 4
116; CHECK-NEXT:    [[MUL_13:%.*]] = fmul fast float [[TMP26]], [[A]]
117; CHECK-NEXT:    [[ARRAYIDX2_13:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_12]]
118; CHECK-NEXT:    [[TMP27:%.*]] = load float, ptr [[ARRAYIDX2_13]], align 4
119; CHECK-NEXT:    [[ADD_13:%.*]] = fadd fast float [[MUL_13]], [[TMP27]]
120; CHECK-NEXT:    store float [[ADD_13]], ptr [[ARRAYIDX2_13]], align 4
121; CHECK-NEXT:    [[INDVARS_IV_NEXT_13:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 14
122; CHECK-NEXT:    [[ARRAYIDX_14:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_13]]
123; CHECK-NEXT:    [[TMP28:%.*]] = load float, ptr [[ARRAYIDX_14]], align 4
124; CHECK-NEXT:    [[MUL_14:%.*]] = fmul fast float [[TMP28]], [[A]]
125; CHECK-NEXT:    [[ARRAYIDX2_14:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_13]]
126; CHECK-NEXT:    [[TMP29:%.*]] = load float, ptr [[ARRAYIDX2_14]], align 4
127; CHECK-NEXT:    [[ADD_14:%.*]] = fadd fast float [[MUL_14]], [[TMP29]]
128; CHECK-NEXT:    store float [[ADD_14]], ptr [[ARRAYIDX2_14]], align 4
129; CHECK-NEXT:    [[INDVARS_IV_NEXT_14:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 15
130; CHECK-NEXT:    [[ARRAYIDX_15:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_14]]
131; CHECK-NEXT:    [[TMP30:%.*]] = load float, ptr [[ARRAYIDX_15]], align 4
132; CHECK-NEXT:    [[MUL_15:%.*]] = fmul fast float [[TMP30]], [[A]]
133; CHECK-NEXT:    [[ARRAYIDX2_15:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_14]]
134; CHECK-NEXT:    [[TMP31:%.*]] = load float, ptr [[ARRAYIDX2_15]], align 4
135; CHECK-NEXT:    [[ADD_15:%.*]] = fadd fast float [[MUL_15]], [[TMP31]]
136; CHECK-NEXT:    store float [[ADD_15]], ptr [[ARRAYIDX2_15]], align 4
137; CHECK-NEXT:    [[INDVARS_IV_NEXT_15]] = add nuw nsw i64 [[INDVARS_IV]], 16
138; CHECK-NEXT:    [[EXITCOND_NOT_15:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT_15]], 64
139; CHECK-NEXT:    br i1 [[EXITCOND_NOT_15]], label [[EXIT_LOOP:%.*]], label [[FOR_BODY]]
140; CHECK:       exit_loop:
141; CHECK-NEXT:    ret void
142;
143entry:
144  br label %for.body
145
146for.body:
147  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
148  %arrayidx = getelementptr inbounds float, ptr %x, i64 %indvars.iv
149  %0 = load float, ptr %arrayidx, align 4
150  %mul = fmul fast float %0, %a
151  %arrayidx2 = getelementptr inbounds float, ptr %y, i64 %indvars.iv
152  %1 = load float, ptr %arrayidx2, align 4
153  %add = fadd fast float %mul, %1
154  store float %add, ptr %arrayidx2, align 4
155  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
156  %exitcond.not = icmp eq i64 %indvars.iv.next, 64
157  br i1 %exitcond.not, label %exit_loop, label %for.body
158
159exit_loop:
160  ret void
161}
162
163