xref: /llvm-project/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/rvv-intrinsic-datatypes.cpp (revision 239127d731e633f89b912b0775b638c0e8b4a9eb)
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvfh -target-feature +zvfbfmin \
4 // RUN: -O0 -emit-llvm %s -o - | FileCheck %s
5 
6 #include <riscv_vector.h>
7 
8 // This test case tests the typedef generated under riscv_vector.h
9 
10 // CHECK-LABEL: define dso_local void @_Z3foov
11 // CHECK-SAME: () #[[ATTR0:[0-9]+]] {
12 // CHECK-NEXT:  entry:
13 // CHECK-NEXT:    [[B64:%.*]] = alloca <vscale x 1 x i1>, align 1
14 // CHECK-NEXT:    [[B32:%.*]] = alloca <vscale x 2 x i1>, align 1
15 // CHECK-NEXT:    [[B16:%.*]] = alloca <vscale x 4 x i1>, align 1
16 // CHECK-NEXT:    [[B8:%.*]] = alloca <vscale x 8 x i1>, align 1
17 // CHECK-NEXT:    [[I8MF8:%.*]] = alloca <vscale x 1 x i8>, align 1
18 // CHECK-NEXT:    [[I8MF4:%.*]] = alloca <vscale x 2 x i8>, align 1
19 // CHECK-NEXT:    [[I8MF2:%.*]] = alloca <vscale x 4 x i8>, align 1
20 // CHECK-NEXT:    [[I8M1:%.*]] = alloca <vscale x 8 x i8>, align 1
21 // CHECK-NEXT:    [[I8M2:%.*]] = alloca <vscale x 16 x i8>, align 1
22 // CHECK-NEXT:    [[I8M4:%.*]] = alloca <vscale x 32 x i8>, align 1
23 // CHECK-NEXT:    [[I8M8:%.*]] = alloca <vscale x 64 x i8>, align 1
24 // CHECK-NEXT:    [[U8MF8:%.*]] = alloca <vscale x 1 x i8>, align 1
25 // CHECK-NEXT:    [[U8MF4:%.*]] = alloca <vscale x 2 x i8>, align 1
26 // CHECK-NEXT:    [[U8MF2:%.*]] = alloca <vscale x 4 x i8>, align 1
27 // CHECK-NEXT:    [[U8M1:%.*]] = alloca <vscale x 8 x i8>, align 1
28 // CHECK-NEXT:    [[U8M2:%.*]] = alloca <vscale x 16 x i8>, align 1
29 // CHECK-NEXT:    [[U8M4:%.*]] = alloca <vscale x 32 x i8>, align 1
30 // CHECK-NEXT:    [[U8M8:%.*]] = alloca <vscale x 64 x i8>, align 1
31 // CHECK-NEXT:    [[I16MF4:%.*]] = alloca <vscale x 1 x i16>, align 2
32 // CHECK-NEXT:    [[I16MF2:%.*]] = alloca <vscale x 2 x i16>, align 2
33 // CHECK-NEXT:    [[I16M1:%.*]] = alloca <vscale x 4 x i16>, align 2
34 // CHECK-NEXT:    [[I16M2:%.*]] = alloca <vscale x 8 x i16>, align 2
35 // CHECK-NEXT:    [[I16M4:%.*]] = alloca <vscale x 16 x i16>, align 2
36 // CHECK-NEXT:    [[I16M8:%.*]] = alloca <vscale x 32 x i16>, align 2
37 // CHECK-NEXT:    [[U16MF4:%.*]] = alloca <vscale x 1 x i16>, align 2
38 // CHECK-NEXT:    [[U16MF2:%.*]] = alloca <vscale x 2 x i16>, align 2
39 // CHECK-NEXT:    [[U16M1:%.*]] = alloca <vscale x 4 x i16>, align 2
40 // CHECK-NEXT:    [[U16M2:%.*]] = alloca <vscale x 8 x i16>, align 2
41 // CHECK-NEXT:    [[U16M4:%.*]] = alloca <vscale x 16 x i16>, align 2
42 // CHECK-NEXT:    [[U16M8:%.*]] = alloca <vscale x 32 x i16>, align 2
43 // CHECK-NEXT:    [[I32MF2:%.*]] = alloca <vscale x 1 x i32>, align 4
44 // CHECK-NEXT:    [[I32M1:%.*]] = alloca <vscale x 2 x i32>, align 4
45 // CHECK-NEXT:    [[I32M2:%.*]] = alloca <vscale x 4 x i32>, align 4
46 // CHECK-NEXT:    [[I32M4:%.*]] = alloca <vscale x 8 x i32>, align 4
47 // CHECK-NEXT:    [[I32M8:%.*]] = alloca <vscale x 16 x i32>, align 4
48 // CHECK-NEXT:    [[U32MF2:%.*]] = alloca <vscale x 1 x i32>, align 4
49 // CHECK-NEXT:    [[U32M1:%.*]] = alloca <vscale x 2 x i32>, align 4
50 // CHECK-NEXT:    [[U32M2:%.*]] = alloca <vscale x 4 x i32>, align 4
51 // CHECK-NEXT:    [[U32M4:%.*]] = alloca <vscale x 8 x i32>, align 4
52 // CHECK-NEXT:    [[U32M8:%.*]] = alloca <vscale x 16 x i32>, align 4
53 // CHECK-NEXT:    [[I64M1:%.*]] = alloca <vscale x 1 x i64>, align 8
54 // CHECK-NEXT:    [[I64M2:%.*]] = alloca <vscale x 2 x i64>, align 8
55 // CHECK-NEXT:    [[I64M4:%.*]] = alloca <vscale x 4 x i64>, align 8
56 // CHECK-NEXT:    [[I64M8:%.*]] = alloca <vscale x 8 x i64>, align 8
57 // CHECK-NEXT:    [[U64M1:%.*]] = alloca <vscale x 1 x i64>, align 8
58 // CHECK-NEXT:    [[U64M2:%.*]] = alloca <vscale x 2 x i64>, align 8
59 // CHECK-NEXT:    [[U64M4:%.*]] = alloca <vscale x 4 x i64>, align 8
60 // CHECK-NEXT:    [[U64M8:%.*]] = alloca <vscale x 8 x i64>, align 8
61 // CHECK-NEXT:    [[F16MF4:%.*]] = alloca <vscale x 1 x half>, align 2
62 // CHECK-NEXT:    [[F16MF2:%.*]] = alloca <vscale x 2 x half>, align 2
63 // CHECK-NEXT:    [[F16M1:%.*]] = alloca <vscale x 4 x half>, align 2
64 // CHECK-NEXT:    [[F16M2:%.*]] = alloca <vscale x 8 x half>, align 2
65 // CHECK-NEXT:    [[F16M4:%.*]] = alloca <vscale x 16 x half>, align 2
66 // CHECK-NEXT:    [[F16M8:%.*]] = alloca <vscale x 32 x half>, align 2
67 // CHECK-NEXT:    [[BF16MF4:%.*]] = alloca <vscale x 1 x bfloat>, align 2
68 // CHECK-NEXT:    [[BF16MF2:%.*]] = alloca <vscale x 2 x bfloat>, align 2
69 // CHECK-NEXT:    [[BF16M1:%.*]] = alloca <vscale x 4 x bfloat>, align 2
70 // CHECK-NEXT:    [[BF16M2:%.*]] = alloca <vscale x 8 x bfloat>, align 2
71 // CHECK-NEXT:    [[BF16M4:%.*]] = alloca <vscale x 16 x bfloat>, align 2
72 // CHECK-NEXT:    [[BF16M8:%.*]] = alloca <vscale x 32 x bfloat>, align 2
73 // CHECK-NEXT:    [[F32MF2:%.*]] = alloca <vscale x 1 x float>, align 4
74 // CHECK-NEXT:    [[F32M1:%.*]] = alloca <vscale x 2 x float>, align 4
75 // CHECK-NEXT:    [[F32M2:%.*]] = alloca <vscale x 4 x float>, align 4
76 // CHECK-NEXT:    [[F32M4:%.*]] = alloca <vscale x 8 x float>, align 4
77 // CHECK-NEXT:    [[F32M8:%.*]] = alloca <vscale x 16 x float>, align 4
78 // CHECK-NEXT:    [[F64M1:%.*]] = alloca <vscale x 1 x double>, align 8
79 // CHECK-NEXT:    [[F64M2:%.*]] = alloca <vscale x 2 x double>, align 8
80 // CHECK-NEXT:    [[F64M4:%.*]] = alloca <vscale x 4 x double>, align 8
81 // CHECK-NEXT:    [[F64M8:%.*]] = alloca <vscale x 8 x double>, align 8
82 // CHECK-NEXT:    [[I8MF8X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 1 x i8>, 2), align 1
83 // CHECK-NEXT:    [[I8MF8X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 1 x i8>, 3), align 1
84 // CHECK-NEXT:    [[I8MF8X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 1 x i8>, 4), align 1
85 // CHECK-NEXT:    [[I8MF8X5:%.*]] = alloca target("riscv.vector.tuple", <vscale x 1 x i8>, 5), align 1
86 // CHECK-NEXT:    [[I8MF8X6:%.*]] = alloca target("riscv.vector.tuple", <vscale x 1 x i8>, 6), align 1
87 // CHECK-NEXT:    [[I8MF8X7:%.*]] = alloca target("riscv.vector.tuple", <vscale x 1 x i8>, 7), align 1
88 // CHECK-NEXT:    [[I8MF8X8:%.*]] = alloca target("riscv.vector.tuple", <vscale x 1 x i8>, 8), align 1
89 // CHECK-NEXT:    [[I8MF4X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 2), align 1
90 // CHECK-NEXT:    [[I8MF4X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 3), align 1
91 // CHECK-NEXT:    [[I8MF4X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 4), align 1
92 // CHECK-NEXT:    [[I8MF4X5:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 5), align 1
93 // CHECK-NEXT:    [[I8MF4X6:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 6), align 1
94 // CHECK-NEXT:    [[I8MF4X7:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 7), align 1
95 // CHECK-NEXT:    [[I8MF4X8:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 8), align 1
96 // CHECK-NEXT:    [[I8MF2X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 2), align 1
97 // CHECK-NEXT:    [[I8MF2X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 3), align 1
98 // CHECK-NEXT:    [[I8MF2X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 4), align 1
99 // CHECK-NEXT:    [[I8MF2X5:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 5), align 1
100 // CHECK-NEXT:    [[I8MF2X6:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 6), align 1
101 // CHECK-NEXT:    [[I8MF2X7:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 7), align 1
102 // CHECK-NEXT:    [[I8MF2X8:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 8), align 1
103 // CHECK-NEXT:    [[I8M1X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 2), align 1
104 // CHECK-NEXT:    [[I8M1X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 3), align 1
105 // CHECK-NEXT:    [[I8M1X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 4), align 1
106 // CHECK-NEXT:    [[I8M1X5:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 5), align 1
107 // CHECK-NEXT:    [[I8M1X6:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 6), align 1
108 // CHECK-NEXT:    [[I8M1X7:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 7), align 1
109 // CHECK-NEXT:    [[I8M1X8:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 8), align 1
110 // CHECK-NEXT:    [[I8M2X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 2), align 1
111 // CHECK-NEXT:    [[I8M2X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 3), align 1
112 // CHECK-NEXT:    [[I8M2X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 4), align 1
113 // CHECK-NEXT:    [[I8M4X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 32 x i8>, 2), align 1
114 // CHECK-NEXT:    [[U8MF8X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 1 x i8>, 2), align 1
115 // CHECK-NEXT:    [[U8MF8X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 1 x i8>, 3), align 1
116 // CHECK-NEXT:    [[U8MF8X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 1 x i8>, 4), align 1
117 // CHECK-NEXT:    [[U8MF8X5:%.*]] = alloca target("riscv.vector.tuple", <vscale x 1 x i8>, 5), align 1
118 // CHECK-NEXT:    [[U8MF8X6:%.*]] = alloca target("riscv.vector.tuple", <vscale x 1 x i8>, 6), align 1
119 // CHECK-NEXT:    [[U8MF8X7:%.*]] = alloca target("riscv.vector.tuple", <vscale x 1 x i8>, 7), align 1
120 // CHECK-NEXT:    [[U8MF8X8:%.*]] = alloca target("riscv.vector.tuple", <vscale x 1 x i8>, 8), align 1
121 // CHECK-NEXT:    [[U8MF4X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 2), align 1
122 // CHECK-NEXT:    [[U8MF4X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 3), align 1
123 // CHECK-NEXT:    [[U8MF4X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 4), align 1
124 // CHECK-NEXT:    [[U8MF4X5:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 5), align 1
125 // CHECK-NEXT:    [[U8MF4X6:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 6), align 1
126 // CHECK-NEXT:    [[U8MF4X7:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 7), align 1
127 // CHECK-NEXT:    [[U8MF4X8:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 8), align 1
128 // CHECK-NEXT:    [[U8MF2X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 2), align 1
129 // CHECK-NEXT:    [[U8MF2X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 3), align 1
130 // CHECK-NEXT:    [[U8MF2X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 4), align 1
131 // CHECK-NEXT:    [[U8MF2X5:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 5), align 1
132 // CHECK-NEXT:    [[U8MF2X6:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 6), align 1
133 // CHECK-NEXT:    [[U8MF2X7:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 7), align 1
134 // CHECK-NEXT:    [[U8MF2X8:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 8), align 1
135 // CHECK-NEXT:    [[U8M1X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 2), align 1
136 // CHECK-NEXT:    [[U8M1X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 3), align 1
137 // CHECK-NEXT:    [[U8M1X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 4), align 1
138 // CHECK-NEXT:    [[U8M1X5:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 5), align 1
139 // CHECK-NEXT:    [[U8M1X6:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 6), align 1
140 // CHECK-NEXT:    [[U8M1X7:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 7), align 1
141 // CHECK-NEXT:    [[U8M1X8:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 8), align 1
142 // CHECK-NEXT:    [[U8M2X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 2), align 1
143 // CHECK-NEXT:    [[U8M2X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 3), align 1
144 // CHECK-NEXT:    [[U8M2X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 4), align 1
145 // CHECK-NEXT:    [[U8M4X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 32 x i8>, 2), align 1
146 // CHECK-NEXT:    [[I16MF4X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 2), align 2
147 // CHECK-NEXT:    [[I16MF4X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 3), align 2
148 // CHECK-NEXT:    [[I16MF4X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 4), align 2
149 // CHECK-NEXT:    [[I16MF4X5:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 5), align 2
150 // CHECK-NEXT:    [[I16MF4X6:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 6), align 2
151 // CHECK-NEXT:    [[I16MF4X7:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 7), align 2
152 // CHECK-NEXT:    [[I16MF4X8:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 8), align 2
153 // CHECK-NEXT:    [[I16MF2X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 2), align 2
154 // CHECK-NEXT:    [[I16MF2X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 3), align 2
155 // CHECK-NEXT:    [[I16MF2X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 4), align 2
156 // CHECK-NEXT:    [[I16MF2X5:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 5), align 2
157 // CHECK-NEXT:    [[I16MF2X6:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 6), align 2
158 // CHECK-NEXT:    [[I16MF2X7:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 7), align 2
159 // CHECK-NEXT:    [[I16MF2X8:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 8), align 2
160 // CHECK-NEXT:    [[I16M1X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 2), align 2
161 // CHECK-NEXT:    [[I16M1X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 3), align 2
162 // CHECK-NEXT:    [[I16M1X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 4), align 2
163 // CHECK-NEXT:    [[I16M1X5:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 5), align 2
164 // CHECK-NEXT:    [[I16M1X6:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 6), align 2
165 // CHECK-NEXT:    [[I16M1X7:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 7), align 2
166 // CHECK-NEXT:    [[I16M1X8:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 8), align 2
167 // CHECK-NEXT:    [[I16M2X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 2), align 2
168 // CHECK-NEXT:    [[I16M2X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 3), align 2
169 // CHECK-NEXT:    [[I16M2X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 4), align 2
170 // CHECK-NEXT:    [[I16M4X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 32 x i8>, 2), align 2
171 // CHECK-NEXT:    [[U16MF4X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 2), align 2
172 // CHECK-NEXT:    [[U16MF4X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 3), align 2
173 // CHECK-NEXT:    [[U16MF4X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 4), align 2
174 // CHECK-NEXT:    [[U16MF4X5:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 5), align 2
175 // CHECK-NEXT:    [[U16MF4X6:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 6), align 2
176 // CHECK-NEXT:    [[U16MF4X7:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 7), align 2
177 // CHECK-NEXT:    [[U16MF4X8:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 8), align 2
178 // CHECK-NEXT:    [[U16MF2X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 2), align 2
179 // CHECK-NEXT:    [[U16MF2X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 3), align 2
180 // CHECK-NEXT:    [[U16MF2X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 4), align 2
181 // CHECK-NEXT:    [[U16MF2X5:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 5), align 2
182 // CHECK-NEXT:    [[U16MF2X6:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 6), align 2
183 // CHECK-NEXT:    [[U16MF2X7:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 7), align 2
184 // CHECK-NEXT:    [[U16MF2X8:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 8), align 2
185 // CHECK-NEXT:    [[U16M1X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 2), align 2
186 // CHECK-NEXT:    [[U16M1X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 3), align 2
187 // CHECK-NEXT:    [[U16M1X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 4), align 2
188 // CHECK-NEXT:    [[U16M1X5:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 5), align 2
189 // CHECK-NEXT:    [[U16M1X6:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 6), align 2
190 // CHECK-NEXT:    [[U16M1X7:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 7), align 2
191 // CHECK-NEXT:    [[U16M1X8:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 8), align 2
192 // CHECK-NEXT:    [[U16M2X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 2), align 2
193 // CHECK-NEXT:    [[U16M2X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 3), align 2
194 // CHECK-NEXT:    [[U16M2X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 4), align 2
195 // CHECK-NEXT:    [[U16M4X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 32 x i8>, 2), align 2
196 // CHECK-NEXT:    [[I32MF2X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 2), align 4
197 // CHECK-NEXT:    [[I32MF2X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 3), align 4
198 // CHECK-NEXT:    [[I32MF2X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 4), align 4
199 // CHECK-NEXT:    [[I32MF2X5:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 5), align 4
200 // CHECK-NEXT:    [[I32MF2X6:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 6), align 4
201 // CHECK-NEXT:    [[I32MF2X7:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 7), align 4
202 // CHECK-NEXT:    [[I32MF2X8:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 8), align 4
203 // CHECK-NEXT:    [[I32M1X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 2), align 4
204 // CHECK-NEXT:    [[I32M1X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 3), align 4
205 // CHECK-NEXT:    [[I32M1X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 4), align 4
206 // CHECK-NEXT:    [[I32M1X5:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 5), align 4
207 // CHECK-NEXT:    [[I32M1X6:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 6), align 4
208 // CHECK-NEXT:    [[I32M1X7:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 7), align 4
209 // CHECK-NEXT:    [[I32M1X8:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 8), align 4
210 // CHECK-NEXT:    [[I32M2X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 2), align 4
211 // CHECK-NEXT:    [[I32M2X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 3), align 4
212 // CHECK-NEXT:    [[I32M2X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 4), align 4
213 // CHECK-NEXT:    [[I32M4X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 32 x i8>, 2), align 4
214 // CHECK-NEXT:    [[U32MF2X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 2), align 4
215 // CHECK-NEXT:    [[U32MF2X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 3), align 4
216 // CHECK-NEXT:    [[U32MF2X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 4), align 4
217 // CHECK-NEXT:    [[U32MF2X5:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 5), align 4
218 // CHECK-NEXT:    [[U32MF2X6:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 6), align 4
219 // CHECK-NEXT:    [[U32MF2X7:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 7), align 4
220 // CHECK-NEXT:    [[U32MF2X8:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 8), align 4
221 // CHECK-NEXT:    [[U32M1X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 2), align 4
222 // CHECK-NEXT:    [[U32M1X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 3), align 4
223 // CHECK-NEXT:    [[U32M1X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 4), align 4
224 // CHECK-NEXT:    [[U32M1X5:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 5), align 4
225 // CHECK-NEXT:    [[U32M1X6:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 6), align 4
226 // CHECK-NEXT:    [[U32M1X7:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 7), align 4
227 // CHECK-NEXT:    [[U32M1X8:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 8), align 4
228 // CHECK-NEXT:    [[U32M2X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 2), align 4
229 // CHECK-NEXT:    [[U32M2X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 3), align 4
230 // CHECK-NEXT:    [[U32M2X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 4), align 4
231 // CHECK-NEXT:    [[U32M4X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 32 x i8>, 2), align 4
232 // CHECK-NEXT:    [[I64M1X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 2), align 8
233 // CHECK-NEXT:    [[I64M1X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 3), align 8
234 // CHECK-NEXT:    [[I64M1X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 4), align 8
235 // CHECK-NEXT:    [[I64M1X5:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 5), align 8
236 // CHECK-NEXT:    [[I64M1X6:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 6), align 8
237 // CHECK-NEXT:    [[I64M1X7:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 7), align 8
238 // CHECK-NEXT:    [[I64M1X8:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 8), align 8
239 // CHECK-NEXT:    [[I64M2X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 2), align 8
240 // CHECK-NEXT:    [[I64M2X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 3), align 8
241 // CHECK-NEXT:    [[I64M2X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 4), align 8
242 // CHECK-NEXT:    [[I64M4X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 32 x i8>, 2), align 8
243 // CHECK-NEXT:    [[U64M1X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 2), align 8
244 // CHECK-NEXT:    [[U64M1X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 3), align 8
245 // CHECK-NEXT:    [[U64M1X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 4), align 8
246 // CHECK-NEXT:    [[U64M1X5:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 5), align 8
247 // CHECK-NEXT:    [[U64M1X6:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 6), align 8
248 // CHECK-NEXT:    [[U64M1X7:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 7), align 8
249 // CHECK-NEXT:    [[U64M1X8:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 8), align 8
250 // CHECK-NEXT:    [[U64M2X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 2), align 8
251 // CHECK-NEXT:    [[U64M2X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 3), align 8
252 // CHECK-NEXT:    [[U64M2X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 4), align 8
253 // CHECK-NEXT:    [[U64M4X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 32 x i8>, 2), align 8
254 // CHECK-NEXT:    [[F16MF4X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 2), align 2
255 // CHECK-NEXT:    [[F16MF4X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 3), align 2
256 // CHECK-NEXT:    [[F16MF4X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 4), align 2
257 // CHECK-NEXT:    [[F16MF4X5:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 5), align 2
258 // CHECK-NEXT:    [[F16MF4X6:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 6), align 2
259 // CHECK-NEXT:    [[F16MF4X7:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 7), align 2
260 // CHECK-NEXT:    [[F16MF4X8:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 8), align 2
261 // CHECK-NEXT:    [[F16MF2X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 2), align 2
262 // CHECK-NEXT:    [[F16MF2X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 3), align 2
263 // CHECK-NEXT:    [[F16MF2X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 4), align 2
264 // CHECK-NEXT:    [[F16MF2X5:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 5), align 2
265 // CHECK-NEXT:    [[F16MF2X6:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 6), align 2
266 // CHECK-NEXT:    [[F16MF2X7:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 7), align 2
267 // CHECK-NEXT:    [[F16MF2X8:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 8), align 2
268 // CHECK-NEXT:    [[F16M1X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 2), align 2
269 // CHECK-NEXT:    [[F16M1X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 3), align 2
270 // CHECK-NEXT:    [[F16M1X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 4), align 2
271 // CHECK-NEXT:    [[F16M1X5:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 5), align 2
272 // CHECK-NEXT:    [[F16M1X6:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 6), align 2
273 // CHECK-NEXT:    [[F16M1X7:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 7), align 2
274 // CHECK-NEXT:    [[F16M1X8:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 8), align 2
275 // CHECK-NEXT:    [[F16M2X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 2), align 2
276 // CHECK-NEXT:    [[F16M2X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 3), align 2
277 // CHECK-NEXT:    [[F16M2X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 4), align 2
278 // CHECK-NEXT:    [[F16M4X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 32 x i8>, 2), align 2
279 // CHECK-NEXT:    [[F32MF2X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 2), align 4
280 // CHECK-NEXT:    [[F32MF2X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 3), align 4
281 // CHECK-NEXT:    [[F32MF2X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 4), align 4
282 // CHECK-NEXT:    [[F32MF2X5:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 5), align 4
283 // CHECK-NEXT:    [[F32MF2X6:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 6), align 4
284 // CHECK-NEXT:    [[F32MF2X7:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 7), align 4
285 // CHECK-NEXT:    [[F32MF2X8:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 8), align 4
286 // CHECK-NEXT:    [[F32M1X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 2), align 4
287 // CHECK-NEXT:    [[F32M1X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 3), align 4
288 // CHECK-NEXT:    [[F32M1X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 4), align 4
289 // CHECK-NEXT:    [[F32M1X5:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 5), align 4
290 // CHECK-NEXT:    [[F32M1X6:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 6), align 4
291 // CHECK-NEXT:    [[F32M1X7:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 7), align 4
292 // CHECK-NEXT:    [[F32M1X8:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 8), align 4
293 // CHECK-NEXT:    [[F32M2X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 2), align 4
294 // CHECK-NEXT:    [[F32M2X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 3), align 4
295 // CHECK-NEXT:    [[F32M2X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 4), align 4
296 // CHECK-NEXT:    [[F32M4X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 32 x i8>, 2), align 4
297 // CHECK-NEXT:    [[F64M1X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 2), align 8
298 // CHECK-NEXT:    [[F64M1X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 3), align 8
299 // CHECK-NEXT:    [[F64M1X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 4), align 8
300 // CHECK-NEXT:    [[F64M1X5:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 5), align 8
301 // CHECK-NEXT:    [[F64M1X6:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 6), align 8
302 // CHECK-NEXT:    [[F64M1X7:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 7), align 8
303 // CHECK-NEXT:    [[F64M1X8:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 8), align 8
304 // CHECK-NEXT:    [[F64M2X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 2), align 8
305 // CHECK-NEXT:    [[F64M2X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 3), align 8
306 // CHECK-NEXT:    [[F64M2X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 4), align 8
307 // CHECK-NEXT:    [[F64M4X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 32 x i8>, 2), align 8
308 // CHECK-NEXT:    [[BF16MF4X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 2), align 2
309 // CHECK-NEXT:    [[BF16MF4X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 3), align 2
310 // CHECK-NEXT:    [[BF16MF4X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 4), align 2
311 // CHECK-NEXT:    [[BF16MF4X5:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 5), align 2
312 // CHECK-NEXT:    [[BF16MF4X6:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 6), align 2
313 // CHECK-NEXT:    [[BF16MF4X7:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 7), align 2
314 // CHECK-NEXT:    [[BF16MF4X8:%.*]] = alloca target("riscv.vector.tuple", <vscale x 2 x i8>, 8), align 2
315 // CHECK-NEXT:    [[BF16MF2X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 2), align 2
316 // CHECK-NEXT:    [[BF16MF2X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 3), align 2
317 // CHECK-NEXT:    [[BF16MF2X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 4), align 2
318 // CHECK-NEXT:    [[BF16MF2X5:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 5), align 2
319 // CHECK-NEXT:    [[BF16MF2X6:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 6), align 2
320 // CHECK-NEXT:    [[BF16MF2X7:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 7), align 2
321 // CHECK-NEXT:    [[BF16MF2X8:%.*]] = alloca target("riscv.vector.tuple", <vscale x 4 x i8>, 8), align 2
322 // CHECK-NEXT:    [[BF16M1X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 2), align 2
323 // CHECK-NEXT:    [[BF16M1X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 3), align 2
324 // CHECK-NEXT:    [[BF16M1X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 4), align 2
325 // CHECK-NEXT:    [[BF16M1X5:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 5), align 2
326 // CHECK-NEXT:    [[BF16M1X6:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 6), align 2
327 // CHECK-NEXT:    [[BF16M1X7:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 7), align 2
328 // CHECK-NEXT:    [[BF16M1X8:%.*]] = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 8), align 2
329 // CHECK-NEXT:    [[BF16M2X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 2), align 2
330 // CHECK-NEXT:    [[BF16M2X3:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 3), align 2
331 // CHECK-NEXT:    [[BF16M2X4:%.*]] = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 4), align 2
332 // CHECK-NEXT:    [[BF16M4X2:%.*]] = alloca target("riscv.vector.tuple", <vscale x 32 x i8>, 2), align 2
333 // CHECK-NEXT:    ret void
334 //
335 void foo () {
336   vbool64_t b64;
337   vbool32_t b32;
338   vbool16_t b16;
339   vbool8_t b8;
340 
341   vint8mf8_t i8mf8;
342   vint8mf4_t i8mf4;
343   vint8mf2_t i8mf2;
344   vint8m1_t i8m1;
345   vint8m2_t i8m2;
346   vint8m4_t i8m4;
347   vint8m8_t i8m8;
348 
349   vuint8mf8_t u8mf8;
350   vuint8mf4_t u8mf4;
351   vuint8mf2_t u8mf2;
352   vuint8m1_t u8m1;
353   vuint8m2_t u8m2;
354   vuint8m4_t u8m4;
355   vuint8m8_t u8m8;
356 
357   vint16mf4_t i16mf4;
358   vint16mf2_t i16mf2;
359   vint16m1_t i16m1;
360   vint16m2_t i16m2;
361   vint16m4_t i16m4;
362   vint16m8_t i16m8;
363 
364   vuint16mf4_t u16mf4;
365   vuint16mf2_t u16mf2;
366   vuint16m1_t u16m1;
367   vuint16m2_t u16m2;
368   vuint16m4_t u16m4;
369   vuint16m8_t u16m8;
370 
371   vint32mf2_t i32mf2;
372   vint32m1_t i32m1;
373   vint32m2_t i32m2;
374   vint32m4_t i32m4;
375   vint32m8_t i32m8;
376 
377   vuint32mf2_t u32mf2;
378   vuint32m1_t u32m1;
379   vuint32m2_t u32m2;
380   vuint32m4_t u32m4;
381   vuint32m8_t u32m8;
382 
383   vint64m1_t i64m1;
384   vint64m2_t i64m2;
385   vint64m4_t i64m4;
386   vint64m8_t i64m8;
387 
388   vuint64m1_t u64m1;
389   vuint64m2_t u64m2;
390   vuint64m4_t u64m4;
391   vuint64m8_t u64m8;
392 
393   vfloat16mf4_t f16mf4;
394   vfloat16mf2_t f16mf2;
395   vfloat16m1_t f16m1;
396   vfloat16m2_t f16m2;
397   vfloat16m4_t f16m4;
398   vfloat16m8_t f16m8;
399 
400   vbfloat16mf4_t bf16mf4;
401   vbfloat16mf2_t bf16mf2;
402   vbfloat16m1_t bf16m1;
403   vbfloat16m2_t bf16m2;
404   vbfloat16m4_t bf16m4;
405   vbfloat16m8_t bf16m8;
406 
407   vfloat32mf2_t f32mf2;
408   vfloat32m1_t f32m1;
409   vfloat32m2_t f32m2;
410   vfloat32m4_t f32m4;
411   vfloat32m8_t f32m8;
412 
413   vfloat64m1_t f64m1;
414   vfloat64m2_t f64m2;
415   vfloat64m4_t f64m4;
416   vfloat64m8_t f64m8;
417 
418   // i8
419   vint8mf8x2_t i8mf8x2;
420   vint8mf8x3_t i8mf8x3;
421   vint8mf8x4_t i8mf8x4;
422   vint8mf8x5_t i8mf8x5;
423   vint8mf8x6_t i8mf8x6;
424   vint8mf8x7_t i8mf8x7;
425   vint8mf8x8_t i8mf8x8;
426 
427   vint8mf4x2_t i8mf4x2;
428   vint8mf4x3_t i8mf4x3;
429   vint8mf4x4_t i8mf4x4;
430   vint8mf4x5_t i8mf4x5;
431   vint8mf4x6_t i8mf4x6;
432   vint8mf4x7_t i8mf4x7;
433   vint8mf4x8_t i8mf4x8;
434 
435   vint8mf2x2_t i8mf2x2;
436   vint8mf2x3_t i8mf2x3;
437   vint8mf2x4_t i8mf2x4;
438   vint8mf2x5_t i8mf2x5;
439   vint8mf2x6_t i8mf2x6;
440   vint8mf2x7_t i8mf2x7;
441   vint8mf2x8_t i8mf2x8;
442 
443   vint8m1x2_t i8m1x2;
444   vint8m1x3_t i8m1x3;
445   vint8m1x4_t i8m1x4;
446   vint8m1x5_t i8m1x5;
447   vint8m1x6_t i8m1x6;
448   vint8m1x7_t i8m1x7;
449   vint8m1x8_t i8m1x8;
450 
451   vint8m2x2_t i8m2x2;
452   vint8m2x3_t i8m2x3;
453   vint8m2x4_t i8m2x4;
454 
455   vint8m4x2_t i8m4x2;
456   // u8
457   vuint8mf8x2_t u8mf8x2;
458   vuint8mf8x3_t u8mf8x3;
459   vuint8mf8x4_t u8mf8x4;
460   vuint8mf8x5_t u8mf8x5;
461   vuint8mf8x6_t u8mf8x6;
462   vuint8mf8x7_t u8mf8x7;
463   vuint8mf8x8_t u8mf8x8;
464 
465   vuint8mf4x2_t u8mf4x2;
466   vuint8mf4x3_t u8mf4x3;
467   vuint8mf4x4_t u8mf4x4;
468   vuint8mf4x5_t u8mf4x5;
469   vuint8mf4x6_t u8mf4x6;
470   vuint8mf4x7_t u8mf4x7;
471   vuint8mf4x8_t u8mf4x8;
472 
473   vuint8mf2x2_t u8mf2x2;
474   vuint8mf2x3_t u8mf2x3;
475   vuint8mf2x4_t u8mf2x4;
476   vuint8mf2x5_t u8mf2x5;
477   vuint8mf2x6_t u8mf2x6;
478   vuint8mf2x7_t u8mf2x7;
479   vuint8mf2x8_t u8mf2x8;
480 
481   vuint8m1x2_t u8m1x2;
482   vuint8m1x3_t u8m1x3;
483   vuint8m1x4_t u8m1x4;
484   vuint8m1x5_t u8m1x5;
485   vuint8m1x6_t u8m1x6;
486   vuint8m1x7_t u8m1x7;
487   vuint8m1x8_t u8m1x8;
488 
489   vuint8m2x2_t u8m2x2;
490   vuint8m2x3_t u8m2x3;
491   vuint8m2x4_t u8m2x4;
492 
493   vuint8m4x2_t u8m4x2;
494   // i16
495   vint16mf4x2_t i16mf4x2;
496   vint16mf4x3_t i16mf4x3;
497   vint16mf4x4_t i16mf4x4;
498   vint16mf4x5_t i16mf4x5;
499   vint16mf4x6_t i16mf4x6;
500   vint16mf4x7_t i16mf4x7;
501   vint16mf4x8_t i16mf4x8;
502 
503   vint16mf2x2_t i16mf2x2;
504   vint16mf2x3_t i16mf2x3;
505   vint16mf2x4_t i16mf2x4;
506   vint16mf2x5_t i16mf2x5;
507   vint16mf2x6_t i16mf2x6;
508   vint16mf2x7_t i16mf2x7;
509   vint16mf2x8_t i16mf2x8;
510 
511   vint16m1x2_t i16m1x2;
512   vint16m1x3_t i16m1x3;
513   vint16m1x4_t i16m1x4;
514   vint16m1x5_t i16m1x5;
515   vint16m1x6_t i16m1x6;
516   vint16m1x7_t i16m1x7;
517   vint16m1x8_t i16m1x8;
518 
519   vint16m2x2_t i16m2x2;
520   vint16m2x3_t i16m2x3;
521   vint16m2x4_t i16m2x4;
522 
523   vint16m4x2_t i16m4x2;
524   // u16
525   vuint16mf4x2_t u16mf4x2;
526   vuint16mf4x3_t u16mf4x3;
527   vuint16mf4x4_t u16mf4x4;
528   vuint16mf4x5_t u16mf4x5;
529   vuint16mf4x6_t u16mf4x6;
530   vuint16mf4x7_t u16mf4x7;
531   vuint16mf4x8_t u16mf4x8;
532 
533   vuint16mf2x2_t u16mf2x2;
534   vuint16mf2x3_t u16mf2x3;
535   vuint16mf2x4_t u16mf2x4;
536   vuint16mf2x5_t u16mf2x5;
537   vuint16mf2x6_t u16mf2x6;
538   vuint16mf2x7_t u16mf2x7;
539   vuint16mf2x8_t u16mf2x8;
540 
541   vuint16m1x2_t u16m1x2;
542   vuint16m1x3_t u16m1x3;
543   vuint16m1x4_t u16m1x4;
544   vuint16m1x5_t u16m1x5;
545   vuint16m1x6_t u16m1x6;
546   vuint16m1x7_t u16m1x7;
547   vuint16m1x8_t u16m1x8;
548 
549   vuint16m2x2_t u16m2x2;
550   vuint16m2x3_t u16m2x3;
551   vuint16m2x4_t u16m2x4;
552 
553   vuint16m4x2_t u16m4x2;
554   // i32
555   vint32mf2x2_t i32mf2x2;
556   vint32mf2x3_t i32mf2x3;
557   vint32mf2x4_t i32mf2x4;
558   vint32mf2x5_t i32mf2x5;
559   vint32mf2x6_t i32mf2x6;
560   vint32mf2x7_t i32mf2x7;
561   vint32mf2x8_t i32mf2x8;
562 
563   vint32m1x2_t i32m1x2;
564   vint32m1x3_t i32m1x3;
565   vint32m1x4_t i32m1x4;
566   vint32m1x5_t i32m1x5;
567   vint32m1x6_t i32m1x6;
568   vint32m1x7_t i32m1x7;
569   vint32m1x8_t i32m1x8;
570 
571   vint32m2x2_t i32m2x2;
572   vint32m2x3_t i32m2x3;
573   vint32m2x4_t i32m2x4;
574 
575   vint32m4x2_t i32m4x2;
576   // u32
577   vuint32mf2x2_t u32mf2x2;
578   vuint32mf2x3_t u32mf2x3;
579   vuint32mf2x4_t u32mf2x4;
580   vuint32mf2x5_t u32mf2x5;
581   vuint32mf2x6_t u32mf2x6;
582   vuint32mf2x7_t u32mf2x7;
583   vuint32mf2x8_t u32mf2x8;
584 
585   vuint32m1x2_t u32m1x2;
586   vuint32m1x3_t u32m1x3;
587   vuint32m1x4_t u32m1x4;
588   vuint32m1x5_t u32m1x5;
589   vuint32m1x6_t u32m1x6;
590   vuint32m1x7_t u32m1x7;
591   vuint32m1x8_t u32m1x8;
592 
593   vuint32m2x2_t u32m2x2;
594   vuint32m2x3_t u32m2x3;
595   vuint32m2x4_t u32m2x4;
596 
597   vuint32m4x2_t u32m4x2;
598   //i64
599   vint64m1x2_t i64m1x2;
600   vint64m1x3_t i64m1x3;
601   vint64m1x4_t i64m1x4;
602   vint64m1x5_t i64m1x5;
603   vint64m1x6_t i64m1x6;
604   vint64m1x7_t i64m1x7;
605   vint64m1x8_t i64m1x8;
606 
607   vint64m2x2_t i64m2x2;
608   vint64m2x3_t i64m2x3;
609   vint64m2x4_t i64m2x4;
610 
611   vint64m4x2_t i64m4x2;
612   // u64
613   vuint64m1x2_t u64m1x2;
614   vuint64m1x3_t u64m1x3;
615   vuint64m1x4_t u64m1x4;
616   vuint64m1x5_t u64m1x5;
617   vuint64m1x6_t u64m1x6;
618   vuint64m1x7_t u64m1x7;
619   vuint64m1x8_t u64m1x8;
620 
621   vuint64m2x2_t u64m2x2;
622   vuint64m2x3_t u64m2x3;
623   vuint64m2x4_t u64m2x4;
624 
625   vuint64m4x2_t u64m4x2;
626   // f16
627   vfloat16mf4x2_t f16mf4x2;
628   vfloat16mf4x3_t f16mf4x3;
629   vfloat16mf4x4_t f16mf4x4;
630   vfloat16mf4x5_t f16mf4x5;
631   vfloat16mf4x6_t f16mf4x6;
632   vfloat16mf4x7_t f16mf4x7;
633   vfloat16mf4x8_t f16mf4x8;
634 
635   vfloat16mf2x2_t f16mf2x2;
636   vfloat16mf2x3_t f16mf2x3;
637   vfloat16mf2x4_t f16mf2x4;
638   vfloat16mf2x5_t f16mf2x5;
639   vfloat16mf2x6_t f16mf2x6;
640   vfloat16mf2x7_t f16mf2x7;
641   vfloat16mf2x8_t f16mf2x8;
642 
643   vfloat16m1x2_t f16m1x2;
644   vfloat16m1x3_t f16m1x3;
645   vfloat16m1x4_t f16m1x4;
646   vfloat16m1x5_t f16m1x5;
647   vfloat16m1x6_t f16m1x6;
648   vfloat16m1x7_t f16m1x7;
649   vfloat16m1x8_t f16m1x8;
650 
651   vfloat16m2x2_t f16m2x2;
652   vfloat16m2x3_t f16m2x3;
653   vfloat16m2x4_t f16m2x4;
654 
655   vfloat16m4x2_t f16m4x2;
656   // f32
657   vfloat32mf2x2_t f32mf2x2;
658   vfloat32mf2x3_t f32mf2x3;
659   vfloat32mf2x4_t f32mf2x4;
660   vfloat32mf2x5_t f32mf2x5;
661   vfloat32mf2x6_t f32mf2x6;
662   vfloat32mf2x7_t f32mf2x7;
663   vfloat32mf2x8_t f32mf2x8;
664 
665   vfloat32m1x2_t f32m1x2;
666   vfloat32m1x3_t f32m1x3;
667   vfloat32m1x4_t f32m1x4;
668   vfloat32m1x5_t f32m1x5;
669   vfloat32m1x6_t f32m1x6;
670   vfloat32m1x7_t f32m1x7;
671   vfloat32m1x8_t f32m1x8;
672 
673   vfloat32m2x2_t f32m2x2;
674   vfloat32m2x3_t f32m2x3;
675   vfloat32m2x4_t f32m2x4;
676 
677   vfloat32m4x2_t f32m4x2;
678   //i64
679   vfloat64m1x2_t f64m1x2;
680   vfloat64m1x3_t f64m1x3;
681   vfloat64m1x4_t f64m1x4;
682   vfloat64m1x5_t f64m1x5;
683   vfloat64m1x6_t f64m1x6;
684   vfloat64m1x7_t f64m1x7;
685   vfloat64m1x8_t f64m1x8;
686 
687   vfloat64m2x2_t f64m2x2;
688   vfloat64m2x3_t f64m2x3;
689   vfloat64m2x4_t f64m2x4;
690 
691   vfloat64m4x2_t f64m4x2;
692   // bf16
693   vbfloat16mf4x2_t bf16mf4x2;
694   vbfloat16mf4x3_t bf16mf4x3;
695   vbfloat16mf4x4_t bf16mf4x4;
696   vbfloat16mf4x5_t bf16mf4x5;
697   vbfloat16mf4x6_t bf16mf4x6;
698   vbfloat16mf4x7_t bf16mf4x7;
699   vbfloat16mf4x8_t bf16mf4x8;
700 
701   vbfloat16mf2x2_t bf16mf2x2;
702   vbfloat16mf2x3_t bf16mf2x3;
703   vbfloat16mf2x4_t bf16mf2x4;
704   vbfloat16mf2x5_t bf16mf2x5;
705   vbfloat16mf2x6_t bf16mf2x6;
706   vbfloat16mf2x7_t bf16mf2x7;
707   vbfloat16mf2x8_t bf16mf2x8;
708 
709   vbfloat16m1x2_t bf16m1x2;
710   vbfloat16m1x3_t bf16m1x3;
711   vbfloat16m1x4_t bf16m1x4;
712   vbfloat16m1x5_t bf16m1x5;
713   vbfloat16m1x6_t bf16m1x6;
714   vbfloat16m1x7_t bf16m1x7;
715   vbfloat16m1x8_t bf16m1x8;
716 
717   vbfloat16m2x2_t bf16m2x2;
718   vbfloat16m2x3_t bf16m2x3;
719   vbfloat16m2x4_t bf16m2x4;
720 
721   vbfloat16m4x2_t bf16m4x2;
722 }
723