xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-non-power-of-2.ll (revision 26a8a857dcdc219d57e39b495ff58aef7d746fdc)
1*d58ded4eSKito Cheng; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2*d58ded4eSKito Cheng; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
3*d58ded4eSKito Cheng; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
4*d58ded4eSKito Cheng
5*d58ded4eSKito Chengdefine void @vls3i8(ptr align 8 %array) {
6*d58ded4eSKito Cheng; CHECK-LABEL: vls3i8:
7*d58ded4eSKito Cheng; CHECK:       # %bb.0: # %entry
8*d58ded4eSKito Cheng; CHECK-NEXT:    vsetivli zero, 3, e8, mf4, ta, ma
9*d58ded4eSKito Cheng; CHECK-NEXT:    vle8.v v8, (a0)
10*d58ded4eSKito Cheng; CHECK-NEXT:    vadd.vv v8, v8, v8
11*d58ded4eSKito Cheng; CHECK-NEXT:    vse8.v v8, (a0)
12*d58ded4eSKito Cheng; CHECK-NEXT:    ret
13*d58ded4eSKito Chengentry:
14*d58ded4eSKito Cheng  %arr = getelementptr inbounds <3 x i8>, ptr %array, i64 0
15*d58ded4eSKito Cheng  %1 = load <3 x i8>, ptr %array, align 1
16*d58ded4eSKito Cheng  %2 = add<3 x i8> %1, %1
17*d58ded4eSKito Cheng  store <3 x i8> %2, ptr %array, align 1
18*d58ded4eSKito Cheng  ret void
19*d58ded4eSKito Cheng}
20*d58ded4eSKito Cheng
21*d58ded4eSKito Chengdefine void @vls3(ptr align 8 %array) {
22*d58ded4eSKito Cheng; CHECK-LABEL: vls3:
23*d58ded4eSKito Cheng; CHECK:       # %bb.0: # %entry
24*d58ded4eSKito Cheng; CHECK-NEXT:    vsetivli zero, 3, e32, m1, ta, ma
25*d58ded4eSKito Cheng; CHECK-NEXT:    vle32.v v8, (a0)
26*d58ded4eSKito Cheng; CHECK-NEXT:    vadd.vv v8, v8, v8
27*d58ded4eSKito Cheng; CHECK-NEXT:    vse32.v v8, (a0)
28*d58ded4eSKito Cheng; CHECK-NEXT:    ret
29*d58ded4eSKito Chengentry:
30*d58ded4eSKito Cheng  %arr = getelementptr inbounds <3 x i32>, ptr %array, i64 0
31*d58ded4eSKito Cheng  %1 = load <3 x i32>, ptr %array, align 4
32*d58ded4eSKito Cheng  %2 = add<3 x i32> %1, %1
33*d58ded4eSKito Cheng  store <3 x i32> %2, ptr %array, align 4
34*d58ded4eSKito Cheng  ret void
35*d58ded4eSKito Cheng}
36*d58ded4eSKito Cheng
37*d58ded4eSKito Chengdefine void @vls5(ptr align 8 %array) {
38*d58ded4eSKito Cheng; CHECK-LABEL: vls5:
39*d58ded4eSKito Cheng; CHECK:       # %bb.0: # %entry
40*d58ded4eSKito Cheng; CHECK-NEXT:    vsetivli zero, 5, e32, m2, ta, ma
41*d58ded4eSKito Cheng; CHECK-NEXT:    vle32.v v8, (a0)
42*d58ded4eSKito Cheng; CHECK-NEXT:    vadd.vv v8, v8, v8
43*d58ded4eSKito Cheng; CHECK-NEXT:    vse32.v v8, (a0)
44*d58ded4eSKito Cheng; CHECK-NEXT:    ret
45*d58ded4eSKito Chengentry:
46*d58ded4eSKito Cheng  %arr = getelementptr inbounds <5 x i32>, ptr %array, i64 0
47*d58ded4eSKito Cheng  %1 = load <5 x i32>, ptr %array, align 4
48*d58ded4eSKito Cheng  %2 = add<5 x i32> %1, %1
49*d58ded4eSKito Cheng  store <5 x i32> %2, ptr %array, align 4
50*d58ded4eSKito Cheng  ret void
51*d58ded4eSKito Cheng}
52*d58ded4eSKito Cheng
53*d58ded4eSKito Chengdefine void @vls6(ptr align 8 %array) {
54*d58ded4eSKito Cheng; CHECK-LABEL: vls6:
55*d58ded4eSKito Cheng; CHECK:       # %bb.0: # %entry
56*d58ded4eSKito Cheng; CHECK-NEXT:    vsetivli zero, 6, e32, m2, ta, ma
57*d58ded4eSKito Cheng; CHECK-NEXT:    vle32.v v8, (a0)
58*d58ded4eSKito Cheng; CHECK-NEXT:    vadd.vv v8, v8, v8
59*d58ded4eSKito Cheng; CHECK-NEXT:    vse32.v v8, (a0)
60*d58ded4eSKito Cheng; CHECK-NEXT:    ret
61*d58ded4eSKito Chengentry:
62*d58ded4eSKito Cheng  %arr = getelementptr inbounds <6 x i32>, ptr %array, i64 0
63*d58ded4eSKito Cheng  %1 = load <6 x i32>, ptr %array, align 4
64*d58ded4eSKito Cheng  %2 = add<6 x i32> %1, %1
65*d58ded4eSKito Cheng  store <6 x i32> %2, ptr %array, align 4
66*d58ded4eSKito Cheng  ret void
67*d58ded4eSKito Cheng}
68*d58ded4eSKito Cheng
69*d58ded4eSKito Chengdefine void @vls7(ptr align 8 %array) {
70*d58ded4eSKito Cheng; CHECK-LABEL: vls7:
71*d58ded4eSKito Cheng; CHECK:       # %bb.0: # %entry
72*d58ded4eSKito Cheng; CHECK-NEXT:    vsetivli zero, 7, e32, m2, ta, ma
73*d58ded4eSKito Cheng; CHECK-NEXT:    vle32.v v8, (a0)
74*d58ded4eSKito Cheng; CHECK-NEXT:    vadd.vv v8, v8, v8
75*d58ded4eSKito Cheng; CHECK-NEXT:    vse32.v v8, (a0)
76*d58ded4eSKito Cheng; CHECK-NEXT:    ret
77*d58ded4eSKito Chengentry:
78*d58ded4eSKito Cheng  %arr = getelementptr inbounds <7 x i32>, ptr %array, i64 0
79*d58ded4eSKito Cheng  %1 = load <7 x i32>, ptr %array, align 4
80*d58ded4eSKito Cheng  %2 = add<7 x i32> %1, %1
81*d58ded4eSKito Cheng  store <7 x i32> %2, ptr %array, align 4
82*d58ded4eSKito Cheng  ret void
83*d58ded4eSKito Cheng}
84*d58ded4eSKito Cheng
85*d58ded4eSKito Cheng
86*d58ded4eSKito Chengdefine void @vls9(ptr align 8 %array) {
87*d58ded4eSKito Cheng; CHECK-LABEL: vls9:
88*d58ded4eSKito Cheng; CHECK:       # %bb.0: # %entry
89*d58ded4eSKito Cheng; CHECK-NEXT:    vsetivli zero, 9, e32, m4, ta, ma
90*d58ded4eSKito Cheng; CHECK-NEXT:    vle32.v v8, (a0)
91*d58ded4eSKito Cheng; CHECK-NEXT:    vadd.vv v8, v8, v8
92*d58ded4eSKito Cheng; CHECK-NEXT:    vse32.v v8, (a0)
93*d58ded4eSKito Cheng; CHECK-NEXT:    ret
94*d58ded4eSKito Chengentry:
95*d58ded4eSKito Cheng  %arr = getelementptr inbounds <9 x i32>, ptr %array, i64 0
96*d58ded4eSKito Cheng  %1 = load <9 x i32>, ptr %array, align 4
97*d58ded4eSKito Cheng  %2 = add<9 x i32> %1, %1
98*d58ded4eSKito Cheng  store <9 x i32> %2, ptr %array, align 4
99*d58ded4eSKito Cheng  ret void
100*d58ded4eSKito Cheng}
101*d58ded4eSKito Cheng
102*d58ded4eSKito Cheng
103*d58ded4eSKito Chengdefine void @vls10(ptr align 8 %array) {
104*d58ded4eSKito Cheng; CHECK-LABEL: vls10:
105*d58ded4eSKito Cheng; CHECK:       # %bb.0: # %entry
106*d58ded4eSKito Cheng; CHECK-NEXT:    vsetivli zero, 10, e32, m4, ta, ma
107*d58ded4eSKito Cheng; CHECK-NEXT:    vle32.v v8, (a0)
108*d58ded4eSKito Cheng; CHECK-NEXT:    vadd.vv v8, v8, v8
109*d58ded4eSKito Cheng; CHECK-NEXT:    vse32.v v8, (a0)
110*d58ded4eSKito Cheng; CHECK-NEXT:    ret
111*d58ded4eSKito Chengentry:
112*d58ded4eSKito Cheng  %arr = getelementptr inbounds <10 x i32>, ptr %array, i64 0
113*d58ded4eSKito Cheng  %1 = load <10 x i32>, ptr %array, align 4
114*d58ded4eSKito Cheng  %2 = add<10 x i32> %1, %1
115*d58ded4eSKito Cheng  store <10 x i32> %2, ptr %array, align 4
116*d58ded4eSKito Cheng  ret void
117*d58ded4eSKito Cheng}
118*d58ded4eSKito Cheng
119*d58ded4eSKito Chengdefine void @vls11(ptr align 8 %array) {
120*d58ded4eSKito Cheng; CHECK-LABEL: vls11:
121*d58ded4eSKito Cheng; CHECK:       # %bb.0: # %entry
122*d58ded4eSKito Cheng; CHECK-NEXT:    vsetivli zero, 11, e32, m4, ta, ma
123*d58ded4eSKito Cheng; CHECK-NEXT:    vle32.v v8, (a0)
124*d58ded4eSKito Cheng; CHECK-NEXT:    vadd.vv v8, v8, v8
125*d58ded4eSKito Cheng; CHECK-NEXT:    vse32.v v8, (a0)
126*d58ded4eSKito Cheng; CHECK-NEXT:    ret
127*d58ded4eSKito Chengentry:
128*d58ded4eSKito Cheng  %arr = getelementptr inbounds <11 x i32>, ptr %array, i64 0
129*d58ded4eSKito Cheng  %1 = load <11 x i32>, ptr %array, align 4
130*d58ded4eSKito Cheng  %2 = add<11 x i32> %1, %1
131*d58ded4eSKito Cheng  store <11 x i32> %2, ptr %array, align 4
132*d58ded4eSKito Cheng  ret void
133*d58ded4eSKito Cheng}
134