xref: /llvm-project/llvm/test/Transforms/LoopVectorize/RISCV/lmul.ll (revision 56c091ea7106507b36015297ee9005c9d5fab0bf)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S --riscv-v-register-bit-width-lmul=1 | FileCheck %s -check-prefix=LMUL1
3; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S --riscv-v-register-bit-width-lmul=2 | FileCheck %s -check-prefix=LMUL2
4; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S --riscv-v-register-bit-width-lmul=4 | FileCheck %s -check-prefix=LMUL4
5; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S --riscv-v-register-bit-width-lmul=8 | FileCheck %s -check-prefix=LMUL8
6; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S | FileCheck %s -check-prefix=LMUL2
7
8define void @load_store(ptr %p) {
9; LMUL1-LABEL: @load_store(
10; LMUL1-NEXT:  entry:
11; LMUL1-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
12; LMUL1-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP0]]
13; LMUL1-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
14; LMUL1:       vector.ph:
15; LMUL1-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
16; LMUL1-NEXT:    [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP1]]
17; LMUL1-NEXT:    [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
18; LMUL1-NEXT:    [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
19; LMUL1-NEXT:    br label [[VECTOR_BODY:%.*]]
20; LMUL1:       vector.body:
21; LMUL1-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
22; LMUL1-NEXT:    [[TMP2:%.*]] = add i64 [[INDEX]], 0
23; LMUL1-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i64 [[TMP2]]
24; LMUL1-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP3]], i32 0
25; LMUL1-NEXT:    [[WIDE_LOAD:%.*]] = load <vscale x 1 x i64>, ptr [[TMP4]], align 8
26; LMUL1-NEXT:    [[TMP5:%.*]] = add <vscale x 1 x i64> [[WIDE_LOAD]], splat (i64 1)
27; LMUL1-NEXT:    store <vscale x 1 x i64> [[TMP5]], ptr [[TMP4]], align 8
28; LMUL1-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
29; LMUL1-NEXT:    [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
30; LMUL1-NEXT:    br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
31; LMUL1:       middle.block:
32; LMUL1-NEXT:    [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
33; LMUL1-NEXT:    br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
34; LMUL1:       scalar.ph:
35; LMUL1-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
36; LMUL1-NEXT:    br label [[FOR_BODY:%.*]]
37; LMUL1:       for.body:
38; LMUL1-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
39; LMUL1-NEXT:    [[Q:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 [[IV]]
40; LMUL1-NEXT:    [[V:%.*]] = load i64, ptr [[Q]], align 8
41; LMUL1-NEXT:    [[W:%.*]] = add i64 [[V]], 1
42; LMUL1-NEXT:    store i64 [[W]], ptr [[Q]], align 8
43; LMUL1-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
44; LMUL1-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
45; LMUL1-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
46; LMUL1:       for.end:
47; LMUL1-NEXT:    ret void
48;
49; LMUL2-LABEL: @load_store(
50; LMUL2-NEXT:  entry:
51; LMUL2-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
52; LMUL2-NEXT:    [[TMP1:%.*]] = mul i64 [[TMP0]], 2
53; LMUL2-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
54; LMUL2-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
55; LMUL2:       vector.ph:
56; LMUL2-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
57; LMUL2-NEXT:    [[TMP3:%.*]] = mul i64 [[TMP2]], 2
58; LMUL2-NEXT:    [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
59; LMUL2-NEXT:    [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
60; LMUL2-NEXT:    [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
61; LMUL2-NEXT:    [[TMP9:%.*]] = mul i64 [[TMP8]], 2
62; LMUL2-NEXT:    br label [[VECTOR_BODY:%.*]]
63; LMUL2:       vector.body:
64; LMUL2-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
65; LMUL2-NEXT:    [[TMP4:%.*]] = add i64 [[INDEX]], 0
66; LMUL2-NEXT:    [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i64 [[TMP4]]
67; LMUL2-NEXT:    [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[TMP5]], i32 0
68; LMUL2-NEXT:    [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP6]], align 8
69; LMUL2-NEXT:    [[TMP7:%.*]] = add <vscale x 2 x i64> [[WIDE_LOAD]], splat (i64 1)
70; LMUL2-NEXT:    store <vscale x 2 x i64> [[TMP7]], ptr [[TMP6]], align 8
71; LMUL2-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]]
72; LMUL2-NEXT:    [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
73; LMUL2-NEXT:    br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
74; LMUL2:       middle.block:
75; LMUL2-NEXT:    [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
76; LMUL2-NEXT:    br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
77; LMUL2:       scalar.ph:
78; LMUL2-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
79; LMUL2-NEXT:    br label [[FOR_BODY:%.*]]
80; LMUL2:       for.body:
81; LMUL2-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
82; LMUL2-NEXT:    [[Q:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 [[IV]]
83; LMUL2-NEXT:    [[V:%.*]] = load i64, ptr [[Q]], align 8
84; LMUL2-NEXT:    [[W:%.*]] = add i64 [[V]], 1
85; LMUL2-NEXT:    store i64 [[W]], ptr [[Q]], align 8
86; LMUL2-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
87; LMUL2-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
88; LMUL2-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
89; LMUL2:       for.end:
90; LMUL2-NEXT:    ret void
91;
92; LMUL4-LABEL: @load_store(
93; LMUL4-NEXT:  entry:
94; LMUL4-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
95; LMUL4-NEXT:    [[TMP1:%.*]] = mul i64 [[TMP0]], 4
96; LMUL4-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
97; LMUL4-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
98; LMUL4:       vector.ph:
99; LMUL4-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
100; LMUL4-NEXT:    [[TMP3:%.*]] = mul i64 [[TMP2]], 4
101; LMUL4-NEXT:    [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
102; LMUL4-NEXT:    [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
103; LMUL4-NEXT:    [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
104; LMUL4-NEXT:    [[TMP9:%.*]] = mul i64 [[TMP8]], 4
105; LMUL4-NEXT:    br label [[VECTOR_BODY:%.*]]
106; LMUL4:       vector.body:
107; LMUL4-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
108; LMUL4-NEXT:    [[TMP4:%.*]] = add i64 [[INDEX]], 0
109; LMUL4-NEXT:    [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i64 [[TMP4]]
110; LMUL4-NEXT:    [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[TMP5]], i32 0
111; LMUL4-NEXT:    [[WIDE_LOAD:%.*]] = load <vscale x 4 x i64>, ptr [[TMP6]], align 8
112; LMUL4-NEXT:    [[TMP7:%.*]] = add <vscale x 4 x i64> [[WIDE_LOAD]], splat (i64 1)
113; LMUL4-NEXT:    store <vscale x 4 x i64> [[TMP7]], ptr [[TMP6]], align 8
114; LMUL4-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]]
115; LMUL4-NEXT:    [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
116; LMUL4-NEXT:    br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
117; LMUL4:       middle.block:
118; LMUL4-NEXT:    [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
119; LMUL4-NEXT:    br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
120; LMUL4:       scalar.ph:
121; LMUL4-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
122; LMUL4-NEXT:    br label [[FOR_BODY:%.*]]
123; LMUL4:       for.body:
124; LMUL4-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
125; LMUL4-NEXT:    [[Q:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 [[IV]]
126; LMUL4-NEXT:    [[V:%.*]] = load i64, ptr [[Q]], align 8
127; LMUL4-NEXT:    [[W:%.*]] = add i64 [[V]], 1
128; LMUL4-NEXT:    store i64 [[W]], ptr [[Q]], align 8
129; LMUL4-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
130; LMUL4-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
131; LMUL4-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
132; LMUL4:       for.end:
133; LMUL4-NEXT:    ret void
134;
135; LMUL8-LABEL: @load_store(
136; LMUL8-NEXT:  entry:
137; LMUL8-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
138; LMUL8-NEXT:    [[TMP1:%.*]] = mul i64 [[TMP0]], 8
139; LMUL8-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
140; LMUL8-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
141; LMUL8:       vector.ph:
142; LMUL8-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
143; LMUL8-NEXT:    [[TMP3:%.*]] = mul i64 [[TMP2]], 8
144; LMUL8-NEXT:    [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
145; LMUL8-NEXT:    [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
146; LMUL8-NEXT:    [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
147; LMUL8-NEXT:    [[TMP9:%.*]] = mul i64 [[TMP8]], 8
148; LMUL8-NEXT:    br label [[VECTOR_BODY:%.*]]
149; LMUL8:       vector.body:
150; LMUL8-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
151; LMUL8-NEXT:    [[TMP4:%.*]] = add i64 [[INDEX]], 0
152; LMUL8-NEXT:    [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i64 [[TMP4]]
153; LMUL8-NEXT:    [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[TMP5]], i32 0
154; LMUL8-NEXT:    [[WIDE_LOAD:%.*]] = load <vscale x 8 x i64>, ptr [[TMP6]], align 8
155; LMUL8-NEXT:    [[TMP7:%.*]] = add <vscale x 8 x i64> [[WIDE_LOAD]], splat (i64 1)
156; LMUL8-NEXT:    store <vscale x 8 x i64> [[TMP7]], ptr [[TMP6]], align 8
157; LMUL8-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]]
158; LMUL8-NEXT:    [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
159; LMUL8-NEXT:    br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
160; LMUL8:       middle.block:
161; LMUL8-NEXT:    [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
162; LMUL8-NEXT:    br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
163; LMUL8:       scalar.ph:
164; LMUL8-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
165; LMUL8-NEXT:    br label [[FOR_BODY:%.*]]
166; LMUL8:       for.body:
167; LMUL8-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
168; LMUL8-NEXT:    [[Q:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 [[IV]]
169; LMUL8-NEXT:    [[V:%.*]] = load i64, ptr [[Q]], align 8
170; LMUL8-NEXT:    [[W:%.*]] = add i64 [[V]], 1
171; LMUL8-NEXT:    store i64 [[W]], ptr [[Q]], align 8
172; LMUL8-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
173; LMUL8-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
174; LMUL8-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
175; LMUL8:       for.end:
176; LMUL8-NEXT:    ret void
177;
178entry:
179  br label %for.body
180
181for.body:
182  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
183  %q = getelementptr inbounds i64, ptr %p, i64 %iv
184  %v = load i64, ptr %q
185  %w = add i64 %v, 1
186  store i64 %w, ptr %q
187  %iv.next = add nuw nsw i64 %iv, 1
188  %exitcond.not = icmp eq i64 %iv.next, 1024
189  br i1 %exitcond.not, label %for.end, label %for.body
190
191for.end:
192  ret void
193}
194