xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-non-power-of-2.ll (revision 26a8a857dcdc219d57e39b495ff58aef7d746fdc)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
3; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
4
5define void @vls3i8(ptr align 8 %array) {
6; CHECK-LABEL: vls3i8:
7; CHECK:       # %bb.0: # %entry
8; CHECK-NEXT:    vsetivli zero, 3, e8, mf4, ta, ma
9; CHECK-NEXT:    vle8.v v8, (a0)
10; CHECK-NEXT:    vadd.vv v8, v8, v8
11; CHECK-NEXT:    vse8.v v8, (a0)
12; CHECK-NEXT:    ret
13entry:
14  %arr = getelementptr inbounds <3 x i8>, ptr %array, i64 0
15  %1 = load <3 x i8>, ptr %array, align 1
16  %2 = add<3 x i8> %1, %1
17  store <3 x i8> %2, ptr %array, align 1
18  ret void
19}
20
21define void @vls3(ptr align 8 %array) {
22; CHECK-LABEL: vls3:
23; CHECK:       # %bb.0: # %entry
24; CHECK-NEXT:    vsetivli zero, 3, e32, m1, ta, ma
25; CHECK-NEXT:    vle32.v v8, (a0)
26; CHECK-NEXT:    vadd.vv v8, v8, v8
27; CHECK-NEXT:    vse32.v v8, (a0)
28; CHECK-NEXT:    ret
29entry:
30  %arr = getelementptr inbounds <3 x i32>, ptr %array, i64 0
31  %1 = load <3 x i32>, ptr %array, align 4
32  %2 = add<3 x i32> %1, %1
33  store <3 x i32> %2, ptr %array, align 4
34  ret void
35}
36
37define void @vls5(ptr align 8 %array) {
38; CHECK-LABEL: vls5:
39; CHECK:       # %bb.0: # %entry
40; CHECK-NEXT:    vsetivli zero, 5, e32, m2, ta, ma
41; CHECK-NEXT:    vle32.v v8, (a0)
42; CHECK-NEXT:    vadd.vv v8, v8, v8
43; CHECK-NEXT:    vse32.v v8, (a0)
44; CHECK-NEXT:    ret
45entry:
46  %arr = getelementptr inbounds <5 x i32>, ptr %array, i64 0
47  %1 = load <5 x i32>, ptr %array, align 4
48  %2 = add<5 x i32> %1, %1
49  store <5 x i32> %2, ptr %array, align 4
50  ret void
51}
52
53define void @vls6(ptr align 8 %array) {
54; CHECK-LABEL: vls6:
55; CHECK:       # %bb.0: # %entry
56; CHECK-NEXT:    vsetivli zero, 6, e32, m2, ta, ma
57; CHECK-NEXT:    vle32.v v8, (a0)
58; CHECK-NEXT:    vadd.vv v8, v8, v8
59; CHECK-NEXT:    vse32.v v8, (a0)
60; CHECK-NEXT:    ret
61entry:
62  %arr = getelementptr inbounds <6 x i32>, ptr %array, i64 0
63  %1 = load <6 x i32>, ptr %array, align 4
64  %2 = add<6 x i32> %1, %1
65  store <6 x i32> %2, ptr %array, align 4
66  ret void
67}
68
69define void @vls7(ptr align 8 %array) {
70; CHECK-LABEL: vls7:
71; CHECK:       # %bb.0: # %entry
72; CHECK-NEXT:    vsetivli zero, 7, e32, m2, ta, ma
73; CHECK-NEXT:    vle32.v v8, (a0)
74; CHECK-NEXT:    vadd.vv v8, v8, v8
75; CHECK-NEXT:    vse32.v v8, (a0)
76; CHECK-NEXT:    ret
77entry:
78  %arr = getelementptr inbounds <7 x i32>, ptr %array, i64 0
79  %1 = load <7 x i32>, ptr %array, align 4
80  %2 = add<7 x i32> %1, %1
81  store <7 x i32> %2, ptr %array, align 4
82  ret void
83}
84
85
86define void @vls9(ptr align 8 %array) {
87; CHECK-LABEL: vls9:
88; CHECK:       # %bb.0: # %entry
89; CHECK-NEXT:    vsetivli zero, 9, e32, m4, ta, ma
90; CHECK-NEXT:    vle32.v v8, (a0)
91; CHECK-NEXT:    vadd.vv v8, v8, v8
92; CHECK-NEXT:    vse32.v v8, (a0)
93; CHECK-NEXT:    ret
94entry:
95  %arr = getelementptr inbounds <9 x i32>, ptr %array, i64 0
96  %1 = load <9 x i32>, ptr %array, align 4
97  %2 = add<9 x i32> %1, %1
98  store <9 x i32> %2, ptr %array, align 4
99  ret void
100}
101
102
103define void @vls10(ptr align 8 %array) {
104; CHECK-LABEL: vls10:
105; CHECK:       # %bb.0: # %entry
106; CHECK-NEXT:    vsetivli zero, 10, e32, m4, ta, ma
107; CHECK-NEXT:    vle32.v v8, (a0)
108; CHECK-NEXT:    vadd.vv v8, v8, v8
109; CHECK-NEXT:    vse32.v v8, (a0)
110; CHECK-NEXT:    ret
111entry:
112  %arr = getelementptr inbounds <10 x i32>, ptr %array, i64 0
113  %1 = load <10 x i32>, ptr %array, align 4
114  %2 = add<10 x i32> %1, %1
115  store <10 x i32> %2, ptr %array, align 4
116  ret void
117}
118
119define void @vls11(ptr align 8 %array) {
120; CHECK-LABEL: vls11:
121; CHECK:       # %bb.0: # %entry
122; CHECK-NEXT:    vsetivli zero, 11, e32, m4, ta, ma
123; CHECK-NEXT:    vle32.v v8, (a0)
124; CHECK-NEXT:    vadd.vv v8, v8, v8
125; CHECK-NEXT:    vse32.v v8, (a0)
126; CHECK-NEXT:    ret
127entry:
128  %arr = getelementptr inbounds <11 x i32>, ptr %array, i64 0
129  %1 = load <11 x i32>, ptr %array, align 4
130  %2 = add<11 x i32> %1, %1
131  store <11 x i32> %2, ptr %array, align 4
132  ret void
133}
134