1; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5 2; RUN: llc -mtriple=aarch64-linux-gnu -O0 -mattr=+sve -global-isel -global-isel-abort=1 -aarch64-enable-gisel-sve=1 \ 3; RUN: -stop-after=irtranslator -verify-machineinstrs %s -o - | FileCheck %s 4 5;; vscale x 128-bit 6 7define void @formal_argument_nxv16i8(<vscale x 16 x i8> %0) { 8 ; CHECK-LABEL: name: formal_argument_nxv16i8 9 ; CHECK: bb.1 (%ir-block.1): 10 ; CHECK-NEXT: liveins: $z0 11 ; CHECK-NEXT: {{ $}} 12 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z0 13 ; CHECK-NEXT: RET_ReallyLR 14 ret void 15} 16 17define void @formal_argument_nxv8i16(<vscale x 8 x i16> %0) { 18 ; CHECK-LABEL: name: formal_argument_nxv8i16 19 ; CHECK: bb.1 (%ir-block.1): 20 ; CHECK-NEXT: liveins: $z0 21 ; CHECK-NEXT: {{ $}} 22 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z0 23 ; CHECK-NEXT: RET_ReallyLR 24 ret void 25} 26 27define void @formal_argument_nxv4i32(<vscale x 4 x i32> %0) { 28 ; CHECK-LABEL: name: formal_argument_nxv4i32 29 ; CHECK: bb.1 (%ir-block.1): 30 ; CHECK-NEXT: liveins: $z0 31 ; CHECK-NEXT: {{ $}} 32 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z0 33 ; CHECK-NEXT: RET_ReallyLR 34 ret void 35} 36 37define void @formal_argument_nxv2i64(<vscale x 2 x i64> %0) { 38 ; CHECK-LABEL: name: formal_argument_nxv2i64 39 ; CHECK: bb.1 (%ir-block.1): 40 ; CHECK-NEXT: liveins: $z0 41 ; CHECK-NEXT: {{ $}} 42 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0 43 ; CHECK-NEXT: RET_ReallyLR 44 ret void 45} 46 47define void @formal_argument_nxv4f32(<vscale x 4 x float> %0) { 48 ; CHECK-LABEL: name: formal_argument_nxv4f32 49 ; CHECK: bb.1 (%ir-block.1): 50 ; CHECK-NEXT: liveins: $z0 51 ; CHECK-NEXT: {{ $}} 52 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z0 53 ; CHECK-NEXT: RET_ReallyLR 54 ret void 55} 56 57define void @formal_argument_nxv2f64(<vscale x 2 x double> %0) { 58 ; CHECK-LABEL: name: formal_argument_nxv2f64 59 ; CHECK: bb.1 (%ir-block.1): 60 ; CHECK-NEXT: liveins: $z0 61 ; CHECK-NEXT: {{ $}} 62 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0 63 ; CHECK-NEXT: RET_ReallyLR 64 ret void 65} 66 67define void @formal_argument_nxv2p0(<vscale x 2 x ptr> %0) { 68 ; CHECK-LABEL: name: formal_argument_nxv2p0 69 ; CHECK: bb.1 (%ir-block.1): 70 ; CHECK-NEXT: liveins: $z0 71 ; CHECK-NEXT: {{ $}} 72 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x p0>) = COPY $z0 73 ; CHECK-NEXT: RET_ReallyLR 74 ret void 75} 76 77;; vscale x 256-bit 78 79define void @formal_argument_nxv32i8(<vscale x 32 x i8> %0) { 80 ; CHECK-LABEL: name: formal_argument_nxv32i8 81 ; CHECK: bb.1 (%ir-block.1): 82 ; CHECK-NEXT: liveins: $z0, $z1 83 ; CHECK-NEXT: {{ $}} 84 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z0 85 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z1 86 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 32 x s8>) = G_CONCAT_VECTORS [[COPY]](<vscale x 16 x s8>), [[COPY1]](<vscale x 16 x s8>) 87 ; CHECK-NEXT: RET_ReallyLR 88 ret void 89} 90 91define void @formal_argument_nxv16i16(<vscale x 16 x i16> %0) { 92 ; CHECK-LABEL: name: formal_argument_nxv16i16 93 ; CHECK: bb.1 (%ir-block.1): 94 ; CHECK-NEXT: liveins: $z0, $z1 95 ; CHECK-NEXT: {{ $}} 96 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z0 97 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z1 98 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 16 x s16>) = G_CONCAT_VECTORS [[COPY]](<vscale x 8 x s16>), [[COPY1]](<vscale x 8 x s16>) 99 ; CHECK-NEXT: RET_ReallyLR 100 ret void 101} 102 103define void @formal_argument_nxv8i32(<vscale x 8 x i32> %0) { 104 ; CHECK-LABEL: name: formal_argument_nxv8i32 105 ; CHECK: bb.1 (%ir-block.1): 106 ; CHECK-NEXT: liveins: $z0, $z1 107 ; CHECK-NEXT: {{ $}} 108 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z0 109 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z1 110 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 8 x s32>) = G_CONCAT_VECTORS [[COPY]](<vscale x 4 x s32>), [[COPY1]](<vscale x 4 x s32>) 111 ; CHECK-NEXT: RET_ReallyLR 112 ret void 113} 114 115define void @formal_argument_nxv4i64(<vscale x 4 x i64> %0) { 116 ; CHECK-LABEL: name: formal_argument_nxv4i64 117 ; CHECK: bb.1 (%ir-block.1): 118 ; CHECK-NEXT: liveins: $z0, $z1 119 ; CHECK-NEXT: {{ $}} 120 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0 121 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z1 122 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 4 x s64>) = G_CONCAT_VECTORS [[COPY]](<vscale x 2 x s64>), [[COPY1]](<vscale x 2 x s64>) 123 ; CHECK-NEXT: RET_ReallyLR 124 ret void 125} 126 127define void @formal_argument_nxv8f32(<vscale x 8 x float> %0) { 128 ; CHECK-LABEL: name: formal_argument_nxv8f32 129 ; CHECK: bb.1 (%ir-block.1): 130 ; CHECK-NEXT: liveins: $z0, $z1 131 ; CHECK-NEXT: {{ $}} 132 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z0 133 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z1 134 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 8 x s32>) = G_CONCAT_VECTORS [[COPY]](<vscale x 4 x s32>), [[COPY1]](<vscale x 4 x s32>) 135 ; CHECK-NEXT: RET_ReallyLR 136 ret void 137} 138 139define void @formal_argument_nxv4f64(<vscale x 4 x double> %0) { 140 ; CHECK-LABEL: name: formal_argument_nxv4f64 141 ; CHECK: bb.1 (%ir-block.1): 142 ; CHECK-NEXT: liveins: $z0, $z1 143 ; CHECK-NEXT: {{ $}} 144 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0 145 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z1 146 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 4 x s64>) = G_CONCAT_VECTORS [[COPY]](<vscale x 2 x s64>), [[COPY1]](<vscale x 2 x s64>) 147 ; CHECK-NEXT: RET_ReallyLR 148 ret void 149} 150 151define void @formal_argument_nxv4p0(<vscale x 4 x ptr> %0) { 152 ; CHECK-LABEL: name: formal_argument_nxv4p0 153 ; CHECK: bb.1 (%ir-block.1): 154 ; CHECK-NEXT: liveins: $z0, $z1 155 ; CHECK-NEXT: {{ $}} 156 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0 157 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z1 158 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 4 x p0>) = G_CONCAT_VECTORS [[COPY]](<vscale x 2 x s64>), [[COPY1]](<vscale x 2 x s64>) 159 ; CHECK-NEXT: RET_ReallyLR 160 ret void 161} 162 163;; vscale x 512-bit 164 165define void @formal_argument_nxv64i8(<vscale x 64 x i8> %0) { 166 ; CHECK-LABEL: name: formal_argument_nxv64i8 167 ; CHECK: bb.1 (%ir-block.1): 168 ; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3 169 ; CHECK-NEXT: {{ $}} 170 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z0 171 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z1 172 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z2 173 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z3 174 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 64 x s8>) = G_CONCAT_VECTORS [[COPY]](<vscale x 16 x s8>), [[COPY1]](<vscale x 16 x s8>), [[COPY2]](<vscale x 16 x s8>), [[COPY3]](<vscale x 16 x s8>) 175 ; CHECK-NEXT: RET_ReallyLR 176 ret void 177} 178 179define void @formal_argument_nxv32i16(<vscale x 32 x i16> %0) { 180 ; CHECK-LABEL: name: formal_argument_nxv32i16 181 ; CHECK: bb.1 (%ir-block.1): 182 ; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3 183 ; CHECK-NEXT: {{ $}} 184 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z0 185 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z1 186 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z2 187 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z3 188 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 32 x s16>) = G_CONCAT_VECTORS [[COPY]](<vscale x 8 x s16>), [[COPY1]](<vscale x 8 x s16>), [[COPY2]](<vscale x 8 x s16>), [[COPY3]](<vscale x 8 x s16>) 189 ; CHECK-NEXT: RET_ReallyLR 190 ret void 191} 192 193define void @formal_argument_nxv16i32(<vscale x 16 x i32> %0) { 194 ; CHECK-LABEL: name: formal_argument_nxv16i32 195 ; CHECK: bb.1 (%ir-block.1): 196 ; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3 197 ; CHECK-NEXT: {{ $}} 198 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z0 199 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z1 200 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z2 201 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z3 202 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 16 x s32>) = G_CONCAT_VECTORS [[COPY]](<vscale x 4 x s32>), [[COPY1]](<vscale x 4 x s32>), [[COPY2]](<vscale x 4 x s32>), [[COPY3]](<vscale x 4 x s32>) 203 ; CHECK-NEXT: RET_ReallyLR 204 ret void 205} 206 207define void @formal_argument_nxv8i64(<vscale x 8 x i64> %0) { 208 ; CHECK-LABEL: name: formal_argument_nxv8i64 209 ; CHECK: bb.1 (%ir-block.1): 210 ; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3 211 ; CHECK-NEXT: {{ $}} 212 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0 213 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z1 214 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z2 215 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z3 216 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 8 x s64>) = G_CONCAT_VECTORS [[COPY]](<vscale x 2 x s64>), [[COPY1]](<vscale x 2 x s64>), [[COPY2]](<vscale x 2 x s64>), [[COPY3]](<vscale x 2 x s64>) 217 ; CHECK-NEXT: RET_ReallyLR 218 ret void 219} 220 221define void @formal_argument_nxv16f32(<vscale x 16 x float> %0) { 222 ; CHECK-LABEL: name: formal_argument_nxv16f32 223 ; CHECK: bb.1 (%ir-block.1): 224 ; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3 225 ; CHECK-NEXT: {{ $}} 226 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z0 227 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z1 228 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z2 229 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z3 230 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 16 x s32>) = G_CONCAT_VECTORS [[COPY]](<vscale x 4 x s32>), [[COPY1]](<vscale x 4 x s32>), [[COPY2]](<vscale x 4 x s32>), [[COPY3]](<vscale x 4 x s32>) 231 ; CHECK-NEXT: RET_ReallyLR 232 ret void 233} 234 235define void @formal_argument_nxv8f64(<vscale x 8 x double> %0) { 236 ; CHECK-LABEL: name: formal_argument_nxv8f64 237 ; CHECK: bb.1 (%ir-block.1): 238 ; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3 239 ; CHECK-NEXT: {{ $}} 240 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0 241 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z1 242 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z2 243 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z3 244 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 8 x s64>) = G_CONCAT_VECTORS [[COPY]](<vscale x 2 x s64>), [[COPY1]](<vscale x 2 x s64>), [[COPY2]](<vscale x 2 x s64>), [[COPY3]](<vscale x 2 x s64>) 245 ; CHECK-NEXT: RET_ReallyLR 246 ret void 247} 248 249define void @formal_argument_nxv8p0(<vscale x 8 x ptr> %0) { 250 ; CHECK-LABEL: name: formal_argument_nxv8p0 251 ; CHECK: bb.1 (%ir-block.1): 252 ; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3 253 ; CHECK-NEXT: {{ $}} 254 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0 255 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z1 256 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z2 257 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z3 258 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 8 x p0>) = G_CONCAT_VECTORS [[COPY]](<vscale x 2 x s64>), [[COPY1]](<vscale x 2 x s64>), [[COPY2]](<vscale x 2 x s64>), [[COPY3]](<vscale x 2 x s64>) 259 ; CHECK-NEXT: RET_ReallyLR 260 ret void 261} 262 263;; vscale x 1024-bit 264 265define void @formal_argument_nxv128i8(<vscale x 128 x i8> %0) { 266 ; CHECK-LABEL: name: formal_argument_nxv128i8 267 ; CHECK: bb.1 (%ir-block.1): 268 ; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3, $z4, $z5, $z6, $z7 269 ; CHECK-NEXT: {{ $}} 270 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z0 271 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z1 272 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z2 273 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z3 274 ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z4 275 ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z5 276 ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z6 277 ; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z7 278 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 128 x s8>) = G_CONCAT_VECTORS [[COPY]](<vscale x 16 x s8>), [[COPY1]](<vscale x 16 x s8>), [[COPY2]](<vscale x 16 x s8>), [[COPY3]](<vscale x 16 x s8>), [[COPY4]](<vscale x 16 x s8>), [[COPY5]](<vscale x 16 x s8>), [[COPY6]](<vscale x 16 x s8>), [[COPY7]](<vscale x 16 x s8>) 279 ; CHECK-NEXT: RET_ReallyLR 280 ret void 281} 282 283define void @formal_argument_nxv64i16(<vscale x 64 x i16> %0) { 284 ; CHECK-LABEL: name: formal_argument_nxv64i16 285 ; CHECK: bb.1 (%ir-block.1): 286 ; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3, $z4, $z5, $z6, $z7 287 ; CHECK-NEXT: {{ $}} 288 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z0 289 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z1 290 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z2 291 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z3 292 ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z4 293 ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z5 294 ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z6 295 ; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z7 296 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 64 x s16>) = G_CONCAT_VECTORS [[COPY]](<vscale x 8 x s16>), [[COPY1]](<vscale x 8 x s16>), [[COPY2]](<vscale x 8 x s16>), [[COPY3]](<vscale x 8 x s16>), [[COPY4]](<vscale x 8 x s16>), [[COPY5]](<vscale x 8 x s16>), [[COPY6]](<vscale x 8 x s16>), [[COPY7]](<vscale x 8 x s16>) 297 ; CHECK-NEXT: RET_ReallyLR 298 ret void 299} 300 301define void @formal_argument_nxv32i32(<vscale x 32 x i32> %0) { 302 ; CHECK-LABEL: name: formal_argument_nxv32i32 303 ; CHECK: bb.1 (%ir-block.1): 304 ; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3, $z4, $z5, $z6, $z7 305 ; CHECK-NEXT: {{ $}} 306 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z0 307 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z1 308 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z2 309 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z3 310 ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z4 311 ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z5 312 ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z6 313 ; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z7 314 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 32 x s32>) = G_CONCAT_VECTORS [[COPY]](<vscale x 4 x s32>), [[COPY1]](<vscale x 4 x s32>), [[COPY2]](<vscale x 4 x s32>), [[COPY3]](<vscale x 4 x s32>), [[COPY4]](<vscale x 4 x s32>), [[COPY5]](<vscale x 4 x s32>), [[COPY6]](<vscale x 4 x s32>), [[COPY7]](<vscale x 4 x s32>) 315 ; CHECK-NEXT: RET_ReallyLR 316 ret void 317} 318 319define void @formal_argument_nxv16i64(<vscale x 16 x i64> %0) { 320 ; CHECK-LABEL: name: formal_argument_nxv16i64 321 ; CHECK: bb.1 (%ir-block.1): 322 ; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3, $z4, $z5, $z6, $z7 323 ; CHECK-NEXT: {{ $}} 324 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0 325 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z1 326 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z2 327 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z3 328 ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z4 329 ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z5 330 ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z6 331 ; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z7 332 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 16 x s64>) = G_CONCAT_VECTORS [[COPY]](<vscale x 2 x s64>), [[COPY1]](<vscale x 2 x s64>), [[COPY2]](<vscale x 2 x s64>), [[COPY3]](<vscale x 2 x s64>), [[COPY4]](<vscale x 2 x s64>), [[COPY5]](<vscale x 2 x s64>), [[COPY6]](<vscale x 2 x s64>), [[COPY7]](<vscale x 2 x s64>) 333 ; CHECK-NEXT: RET_ReallyLR 334 ret void 335} 336 337define void @formal_argument_nxv32f32(<vscale x 32 x float> %0) { 338 ; CHECK-LABEL: name: formal_argument_nxv32f32 339 ; CHECK: bb.1 (%ir-block.1): 340 ; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3, $z4, $z5, $z6, $z7 341 ; CHECK-NEXT: {{ $}} 342 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z0 343 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z1 344 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z2 345 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z3 346 ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z4 347 ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z5 348 ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z6 349 ; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z7 350 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 32 x s32>) = G_CONCAT_VECTORS [[COPY]](<vscale x 4 x s32>), [[COPY1]](<vscale x 4 x s32>), [[COPY2]](<vscale x 4 x s32>), [[COPY3]](<vscale x 4 x s32>), [[COPY4]](<vscale x 4 x s32>), [[COPY5]](<vscale x 4 x s32>), [[COPY6]](<vscale x 4 x s32>), [[COPY7]](<vscale x 4 x s32>) 351 ; CHECK-NEXT: RET_ReallyLR 352 ret void 353} 354 355define void @formal_argument_nxv16f64(<vscale x 16 x double> %0) { 356 ; CHECK-LABEL: name: formal_argument_nxv16f64 357 ; CHECK: bb.1 (%ir-block.1): 358 ; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3, $z4, $z5, $z6, $z7 359 ; CHECK-NEXT: {{ $}} 360 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0 361 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z1 362 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z2 363 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z3 364 ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z4 365 ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z5 366 ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z6 367 ; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z7 368 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 16 x s64>) = G_CONCAT_VECTORS [[COPY]](<vscale x 2 x s64>), [[COPY1]](<vscale x 2 x s64>), [[COPY2]](<vscale x 2 x s64>), [[COPY3]](<vscale x 2 x s64>), [[COPY4]](<vscale x 2 x s64>), [[COPY5]](<vscale x 2 x s64>), [[COPY6]](<vscale x 2 x s64>), [[COPY7]](<vscale x 2 x s64>) 369 ; CHECK-NEXT: RET_ReallyLR 370 ret void 371} 372 373define void @formal_argument_nxv16p0(<vscale x 16 x ptr> %0) { 374 ; CHECK-LABEL: name: formal_argument_nxv16p0 375 ; CHECK: bb.1 (%ir-block.1): 376 ; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3, $z4, $z5, $z6, $z7 377 ; CHECK-NEXT: {{ $}} 378 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0 379 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z1 380 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z2 381 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z3 382 ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z4 383 ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z5 384 ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z6 385 ; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z7 386 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 16 x p0>) = G_CONCAT_VECTORS [[COPY]](<vscale x 2 x s64>), [[COPY1]](<vscale x 2 x s64>), [[COPY2]](<vscale x 2 x s64>), [[COPY3]](<vscale x 2 x s64>), [[COPY4]](<vscale x 2 x s64>), [[COPY5]](<vscale x 2 x s64>), [[COPY6]](<vscale x 2 x s64>), [[COPY7]](<vscale x 2 x s64>) 387 ; CHECK-NEXT: RET_ReallyLR 388 ret void 389} 390