1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py 2# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV32I %s 3# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV64I %s 4--- 5name: test_nxv1i8 6legalized: true 7regBankSelected: true 8tracksRegLiveness: true 9body: | 10 bb.0.entry: 11 liveins: $v8, $v9 12 13 ; RV32I-LABEL: name: test_nxv1i8 14 ; RV32I: liveins: $v8, $v9 15 ; RV32I-NEXT: {{ $}} 16 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 17 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9 18 ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF 19 ; RV32I-NEXT: [[PseudoVADD_VV_MF8_:%[0-9]+]]:vr = PseudoVADD_VV_MF8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */ 20 ; RV32I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF8_]] 21 ; RV32I-NEXT: PseudoRET implicit $v8 22 ; 23 ; RV64I-LABEL: name: test_nxv1i8 24 ; RV64I: liveins: $v8, $v9 25 ; RV64I-NEXT: {{ $}} 26 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 27 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9 28 ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF 29 ; RV64I-NEXT: [[PseudoVADD_VV_MF8_:%[0-9]+]]:vr = PseudoVADD_VV_MF8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */ 30 ; RV64I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF8_]] 31 ; RV64I-NEXT: PseudoRET implicit $v8 32 %0:vrb(<vscale x 1 x s8>) = COPY $v8 33 %1:vrb(<vscale x 1 x s8>) = COPY $v9 34 %2:vrb(<vscale x 1 x s8>) = G_ADD %0, %1 35 $v8 = COPY %2(<vscale x 1 x s8>) 36 PseudoRET implicit $v8 37 38... 39--- 40name: test_nxv2i8 41legalized: true 42regBankSelected: true 43tracksRegLiveness: true 44body: | 45 bb.0.entry: 46 liveins: $v8, $v9 47 48 ; RV32I-LABEL: name: test_nxv2i8 49 ; RV32I: liveins: $v8, $v9 50 ; RV32I-NEXT: {{ $}} 51 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 52 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9 53 ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF 54 ; RV32I-NEXT: [[PseudoVADD_VV_MF4_:%[0-9]+]]:vr = PseudoVADD_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */ 55 ; RV32I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF4_]] 56 ; RV32I-NEXT: PseudoRET implicit $v8 57 ; 58 ; RV64I-LABEL: name: test_nxv2i8 59 ; RV64I: liveins: $v8, $v9 60 ; RV64I-NEXT: {{ $}} 61 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 62 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9 63 ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF 64 ; RV64I-NEXT: [[PseudoVADD_VV_MF4_:%[0-9]+]]:vr = PseudoVADD_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */ 65 ; RV64I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF4_]] 66 ; RV64I-NEXT: PseudoRET implicit $v8 67 %0:vrb(<vscale x 2 x s8>) = COPY $v8 68 %1:vrb(<vscale x 2 x s8>) = COPY $v9 69 %2:vrb(<vscale x 2 x s8>) = G_ADD %0, %1 70 $v8 = COPY %2(<vscale x 2 x s8>) 71 PseudoRET implicit $v8 72 73... 74--- 75name: test_nxv4i8 76legalized: true 77regBankSelected: true 78tracksRegLiveness: true 79body: | 80 bb.0.entry: 81 liveins: $v8, $v9 82 83 ; RV32I-LABEL: name: test_nxv4i8 84 ; RV32I: liveins: $v8, $v9 85 ; RV32I-NEXT: {{ $}} 86 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 87 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9 88 ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF 89 ; RV32I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */ 90 ; RV32I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF2_]] 91 ; RV32I-NEXT: PseudoRET implicit $v8 92 ; 93 ; RV64I-LABEL: name: test_nxv4i8 94 ; RV64I: liveins: $v8, $v9 95 ; RV64I-NEXT: {{ $}} 96 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 97 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9 98 ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF 99 ; RV64I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */ 100 ; RV64I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF2_]] 101 ; RV64I-NEXT: PseudoRET implicit $v8 102 %0:vrb(<vscale x 4 x s8>) = COPY $v8 103 %1:vrb(<vscale x 4 x s8>) = COPY $v9 104 %2:vrb(<vscale x 4 x s8>) = G_ADD %0, %1 105 $v8 = COPY %2(<vscale x 4 x s8>) 106 PseudoRET implicit $v8 107 108... 109--- 110name: test_nxv8i8 111legalized: true 112regBankSelected: true 113tracksRegLiveness: true 114body: | 115 bb.0.entry: 116 liveins: $v8, $v9 117 118 ; RV32I-LABEL: name: test_nxv8i8 119 ; RV32I: liveins: $v8, $v9 120 ; RV32I-NEXT: {{ $}} 121 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 122 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9 123 ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF 124 ; RV32I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */ 125 ; RV32I-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]] 126 ; RV32I-NEXT: PseudoRET implicit $v8 127 ; 128 ; RV64I-LABEL: name: test_nxv8i8 129 ; RV64I: liveins: $v8, $v9 130 ; RV64I-NEXT: {{ $}} 131 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 132 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9 133 ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF 134 ; RV64I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */ 135 ; RV64I-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]] 136 ; RV64I-NEXT: PseudoRET implicit $v8 137 %0:vrb(<vscale x 8 x s8>) = COPY $v8 138 %1:vrb(<vscale x 8 x s8>) = COPY $v9 139 %2:vrb(<vscale x 8 x s8>) = G_ADD %0, %1 140 $v8 = COPY %2(<vscale x 8 x s8>) 141 PseudoRET implicit $v8 142 143... 144--- 145name: test_nxv16i8 146legalized: true 147regBankSelected: true 148tracksRegLiveness: true 149body: | 150 bb.0.entry: 151 liveins: $v8m2, $v10m2 152 153 ; RV32I-LABEL: name: test_nxv16i8 154 ; RV32I: liveins: $v8m2, $v10m2 155 ; RV32I-NEXT: {{ $}} 156 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2 157 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2 158 ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF 159 ; RV32I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */ 160 ; RV32I-NEXT: $v8m2 = COPY [[PseudoVADD_VV_M2_]] 161 ; RV32I-NEXT: PseudoRET implicit $v8m2 162 ; 163 ; RV64I-LABEL: name: test_nxv16i8 164 ; RV64I: liveins: $v8m2, $v10m2 165 ; RV64I-NEXT: {{ $}} 166 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2 167 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2 168 ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF 169 ; RV64I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */ 170 ; RV64I-NEXT: $v8m2 = COPY [[PseudoVADD_VV_M2_]] 171 ; RV64I-NEXT: PseudoRET implicit $v8m2 172 %0:vrb(<vscale x 16 x s8>) = COPY $v8m2 173 %1:vrb(<vscale x 16 x s8>) = COPY $v10m2 174 %2:vrb(<vscale x 16 x s8>) = G_ADD %0, %1 175 $v8m2 = COPY %2(<vscale x 16 x s8>) 176 PseudoRET implicit $v8m2 177 178... 179--- 180name: test_nxv32i8 181legalized: true 182regBankSelected: true 183tracksRegLiveness: true 184body: | 185 bb.0.entry: 186 liveins: $v8m4, $v12m4 187 188 ; RV32I-LABEL: name: test_nxv32i8 189 ; RV32I: liveins: $v8m4, $v12m4 190 ; RV32I-NEXT: {{ $}} 191 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4 192 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4 193 ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF 194 ; RV32I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */ 195 ; RV32I-NEXT: $v8m4 = COPY [[PseudoVADD_VV_M4_]] 196 ; RV32I-NEXT: PseudoRET implicit $v8m4 197 ; 198 ; RV64I-LABEL: name: test_nxv32i8 199 ; RV64I: liveins: $v8m4, $v12m4 200 ; RV64I-NEXT: {{ $}} 201 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4 202 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4 203 ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF 204 ; RV64I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */ 205 ; RV64I-NEXT: $v8m4 = COPY [[PseudoVADD_VV_M4_]] 206 ; RV64I-NEXT: PseudoRET implicit $v8m4 207 %0:vrb(<vscale x 32 x s8>) = COPY $v8m4 208 %1:vrb(<vscale x 32 x s8>) = COPY $v12m4 209 %2:vrb(<vscale x 32 x s8>) = G_ADD %0, %1 210 $v8m4 = COPY %2(<vscale x 32 x s8>) 211 PseudoRET implicit $v8m4 212 213... 214--- 215name: test_nxv64i8 216legalized: true 217regBankSelected: true 218tracksRegLiveness: true 219body: | 220 bb.0.entry: 221 liveins: $v8m8, $v16m8 222 223 ; RV32I-LABEL: name: test_nxv64i8 224 ; RV32I: liveins: $v8m8, $v16m8 225 ; RV32I-NEXT: {{ $}} 226 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8 227 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8 228 ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF 229 ; RV32I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */ 230 ; RV32I-NEXT: $v8m8 = COPY [[PseudoVADD_VV_M8_]] 231 ; RV32I-NEXT: PseudoRET implicit $v8m8 232 ; 233 ; RV64I-LABEL: name: test_nxv64i8 234 ; RV64I: liveins: $v8m8, $v16m8 235 ; RV64I-NEXT: {{ $}} 236 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8 237 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8 238 ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF 239 ; RV64I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */ 240 ; RV64I-NEXT: $v8m8 = COPY [[PseudoVADD_VV_M8_]] 241 ; RV64I-NEXT: PseudoRET implicit $v8m8 242 %0:vrb(<vscale x 64 x s8>) = COPY $v8m8 243 %1:vrb(<vscale x 64 x s8>) = COPY $v16m8 244 %2:vrb(<vscale x 64 x s8>) = G_ADD %0, %1 245 $v8m8 = COPY %2(<vscale x 64 x s8>) 246 PseudoRET implicit $v8m8 247 248... 249--- 250name: test_nxv1i16 251legalized: true 252regBankSelected: true 253tracksRegLiveness: true 254body: | 255 bb.0.entry: 256 liveins: $v8, $v9 257 258 ; RV32I-LABEL: name: test_nxv1i16 259 ; RV32I: liveins: $v8, $v9 260 ; RV32I-NEXT: {{ $}} 261 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 262 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9 263 ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF 264 ; RV32I-NEXT: [[PseudoVADD_VV_MF4_:%[0-9]+]]:vr = PseudoVADD_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */ 265 ; RV32I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF4_]] 266 ; RV32I-NEXT: PseudoRET implicit $v8 267 ; 268 ; RV64I-LABEL: name: test_nxv1i16 269 ; RV64I: liveins: $v8, $v9 270 ; RV64I-NEXT: {{ $}} 271 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 272 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9 273 ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF 274 ; RV64I-NEXT: [[PseudoVADD_VV_MF4_:%[0-9]+]]:vr = PseudoVADD_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */ 275 ; RV64I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF4_]] 276 ; RV64I-NEXT: PseudoRET implicit $v8 277 %0:vrb(<vscale x 1 x s16>) = COPY $v8 278 %1:vrb(<vscale x 1 x s16>) = COPY $v9 279 %2:vrb(<vscale x 1 x s16>) = G_ADD %0, %1 280 $v8 = COPY %2(<vscale x 1 x s16>) 281 PseudoRET implicit $v8 282 283... 284--- 285name: test_nxv2i16 286legalized: true 287regBankSelected: true 288tracksRegLiveness: true 289body: | 290 bb.0.entry: 291 liveins: $v8, $v9 292 293 ; RV32I-LABEL: name: test_nxv2i16 294 ; RV32I: liveins: $v8, $v9 295 ; RV32I-NEXT: {{ $}} 296 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 297 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9 298 ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF 299 ; RV32I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */ 300 ; RV32I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF2_]] 301 ; RV32I-NEXT: PseudoRET implicit $v8 302 ; 303 ; RV64I-LABEL: name: test_nxv2i16 304 ; RV64I: liveins: $v8, $v9 305 ; RV64I-NEXT: {{ $}} 306 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 307 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9 308 ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF 309 ; RV64I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */ 310 ; RV64I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF2_]] 311 ; RV64I-NEXT: PseudoRET implicit $v8 312 %0:vrb(<vscale x 2 x s16>) = COPY $v8 313 %1:vrb(<vscale x 2 x s16>) = COPY $v9 314 %2:vrb(<vscale x 2 x s16>) = G_ADD %0, %1 315 $v8 = COPY %2(<vscale x 2 x s16>) 316 PseudoRET implicit $v8 317 318... 319--- 320name: test_nxv4i16 321legalized: true 322regBankSelected: true 323tracksRegLiveness: true 324body: | 325 bb.0.entry: 326 liveins: $v8, $v9 327 328 ; RV32I-LABEL: name: test_nxv4i16 329 ; RV32I: liveins: $v8, $v9 330 ; RV32I-NEXT: {{ $}} 331 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 332 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9 333 ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF 334 ; RV32I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */ 335 ; RV32I-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]] 336 ; RV32I-NEXT: PseudoRET implicit $v8 337 ; 338 ; RV64I-LABEL: name: test_nxv4i16 339 ; RV64I: liveins: $v8, $v9 340 ; RV64I-NEXT: {{ $}} 341 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 342 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9 343 ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF 344 ; RV64I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */ 345 ; RV64I-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]] 346 ; RV64I-NEXT: PseudoRET implicit $v8 347 %0:vrb(<vscale x 4 x s16>) = COPY $v8 348 %1:vrb(<vscale x 4 x s16>) = COPY $v9 349 %2:vrb(<vscale x 4 x s16>) = G_ADD %0, %1 350 $v8 = COPY %2(<vscale x 4 x s16>) 351 PseudoRET implicit $v8 352 353... 354--- 355name: test_nxv8i16 356legalized: true 357regBankSelected: true 358tracksRegLiveness: true 359body: | 360 bb.0.entry: 361 liveins: $v8m2, $v10m2 362 363 ; RV32I-LABEL: name: test_nxv8i16 364 ; RV32I: liveins: $v8m2, $v10m2 365 ; RV32I-NEXT: {{ $}} 366 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2 367 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2 368 ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF 369 ; RV32I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */ 370 ; RV32I-NEXT: $v8m2 = COPY [[PseudoVADD_VV_M2_]] 371 ; RV32I-NEXT: PseudoRET implicit $v8m2 372 ; 373 ; RV64I-LABEL: name: test_nxv8i16 374 ; RV64I: liveins: $v8m2, $v10m2 375 ; RV64I-NEXT: {{ $}} 376 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2 377 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2 378 ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF 379 ; RV64I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */ 380 ; RV64I-NEXT: $v8m2 = COPY [[PseudoVADD_VV_M2_]] 381 ; RV64I-NEXT: PseudoRET implicit $v8m2 382 %0:vrb(<vscale x 8 x s16>) = COPY $v8m2 383 %1:vrb(<vscale x 8 x s16>) = COPY $v10m2 384 %2:vrb(<vscale x 8 x s16>) = G_ADD %0, %1 385 $v8m2 = COPY %2(<vscale x 8 x s16>) 386 PseudoRET implicit $v8m2 387 388... 389--- 390name: test_nxv16i16 391legalized: true 392regBankSelected: true 393tracksRegLiveness: true 394body: | 395 bb.0.entry: 396 liveins: $v8m4, $v12m4 397 398 ; RV32I-LABEL: name: test_nxv16i16 399 ; RV32I: liveins: $v8m4, $v12m4 400 ; RV32I-NEXT: {{ $}} 401 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4 402 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4 403 ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF 404 ; RV32I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */ 405 ; RV32I-NEXT: $v8m4 = COPY [[PseudoVADD_VV_M4_]] 406 ; RV32I-NEXT: PseudoRET implicit $v8m4 407 ; 408 ; RV64I-LABEL: name: test_nxv16i16 409 ; RV64I: liveins: $v8m4, $v12m4 410 ; RV64I-NEXT: {{ $}} 411 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4 412 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4 413 ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF 414 ; RV64I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */ 415 ; RV64I-NEXT: $v8m4 = COPY [[PseudoVADD_VV_M4_]] 416 ; RV64I-NEXT: PseudoRET implicit $v8m4 417 %0:vrb(<vscale x 16 x s16>) = COPY $v8m4 418 %1:vrb(<vscale x 16 x s16>) = COPY $v12m4 419 %2:vrb(<vscale x 16 x s16>) = G_ADD %0, %1 420 $v8m4 = COPY %2(<vscale x 16 x s16>) 421 PseudoRET implicit $v8m4 422 423... 424--- 425name: test_nxv32i16 426legalized: true 427regBankSelected: true 428tracksRegLiveness: true 429body: | 430 bb.0.entry: 431 liveins: $v8m8, $v16m8 432 433 ; RV32I-LABEL: name: test_nxv32i16 434 ; RV32I: liveins: $v8m8, $v16m8 435 ; RV32I-NEXT: {{ $}} 436 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8 437 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8 438 ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF 439 ; RV32I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */ 440 ; RV32I-NEXT: $v8m8 = COPY [[PseudoVADD_VV_M8_]] 441 ; RV32I-NEXT: PseudoRET implicit $v8m8 442 ; 443 ; RV64I-LABEL: name: test_nxv32i16 444 ; RV64I: liveins: $v8m8, $v16m8 445 ; RV64I-NEXT: {{ $}} 446 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8 447 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8 448 ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF 449 ; RV64I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */ 450 ; RV64I-NEXT: $v8m8 = COPY [[PseudoVADD_VV_M8_]] 451 ; RV64I-NEXT: PseudoRET implicit $v8m8 452 %0:vrb(<vscale x 32 x s16>) = COPY $v8m8 453 %1:vrb(<vscale x 32 x s16>) = COPY $v16m8 454 %2:vrb(<vscale x 32 x s16>) = G_ADD %0, %1 455 $v8m8 = COPY %2(<vscale x 32 x s16>) 456 PseudoRET implicit $v8m8 457 458... 459--- 460name: test_nxv1i32 461legalized: true 462regBankSelected: true 463tracksRegLiveness: true 464body: | 465 bb.0.entry: 466 liveins: $v8, $v9 467 468 ; RV32I-LABEL: name: test_nxv1i32 469 ; RV32I: liveins: $v8, $v9 470 ; RV32I-NEXT: {{ $}} 471 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 472 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9 473 ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF 474 ; RV32I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */ 475 ; RV32I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF2_]] 476 ; RV32I-NEXT: PseudoRET implicit $v8 477 ; 478 ; RV64I-LABEL: name: test_nxv1i32 479 ; RV64I: liveins: $v8, $v9 480 ; RV64I-NEXT: {{ $}} 481 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 482 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9 483 ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF 484 ; RV64I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */ 485 ; RV64I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF2_]] 486 ; RV64I-NEXT: PseudoRET implicit $v8 487 %0:vrb(<vscale x 1 x s32>) = COPY $v8 488 %1:vrb(<vscale x 1 x s32>) = COPY $v9 489 %2:vrb(<vscale x 1 x s32>) = G_ADD %0, %1 490 $v8 = COPY %2(<vscale x 1 x s32>) 491 PseudoRET implicit $v8 492 493... 494--- 495name: test_nxv2i32 496legalized: true 497regBankSelected: true 498tracksRegLiveness: true 499body: | 500 bb.0.entry: 501 liveins: $v8, $v9 502 503 ; RV32I-LABEL: name: test_nxv2i32 504 ; RV32I: liveins: $v8, $v9 505 ; RV32I-NEXT: {{ $}} 506 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 507 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9 508 ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF 509 ; RV32I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */ 510 ; RV32I-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]] 511 ; RV32I-NEXT: PseudoRET implicit $v8 512 ; 513 ; RV64I-LABEL: name: test_nxv2i32 514 ; RV64I: liveins: $v8, $v9 515 ; RV64I-NEXT: {{ $}} 516 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 517 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9 518 ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF 519 ; RV64I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */ 520 ; RV64I-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]] 521 ; RV64I-NEXT: PseudoRET implicit $v8 522 %0:vrb(<vscale x 2 x s32>) = COPY $v8 523 %1:vrb(<vscale x 2 x s32>) = COPY $v9 524 %2:vrb(<vscale x 2 x s32>) = G_ADD %0, %1 525 $v8 = COPY %2(<vscale x 2 x s32>) 526 PseudoRET implicit $v8 527 528... 529--- 530name: test_nxv4i32 531legalized: true 532regBankSelected: true 533tracksRegLiveness: true 534body: | 535 bb.0.entry: 536 liveins: $v8m2, $v10m2 537 538 ; RV32I-LABEL: name: test_nxv4i32 539 ; RV32I: liveins: $v8m2, $v10m2 540 ; RV32I-NEXT: {{ $}} 541 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2 542 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2 543 ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF 544 ; RV32I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */ 545 ; RV32I-NEXT: $v8m2 = COPY [[PseudoVADD_VV_M2_]] 546 ; RV32I-NEXT: PseudoRET implicit $v8m2 547 ; 548 ; RV64I-LABEL: name: test_nxv4i32 549 ; RV64I: liveins: $v8m2, $v10m2 550 ; RV64I-NEXT: {{ $}} 551 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2 552 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2 553 ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF 554 ; RV64I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */ 555 ; RV64I-NEXT: $v8m2 = COPY [[PseudoVADD_VV_M2_]] 556 ; RV64I-NEXT: PseudoRET implicit $v8m2 557 %0:vrb(<vscale x 4 x s32>) = COPY $v8m2 558 %1:vrb(<vscale x 4 x s32>) = COPY $v10m2 559 %2:vrb(<vscale x 4 x s32>) = G_ADD %0, %1 560 $v8m2 = COPY %2(<vscale x 4 x s32>) 561 PseudoRET implicit $v8m2 562 563... 564--- 565name: test_nxv8i32 566legalized: true 567regBankSelected: true 568tracksRegLiveness: true 569body: | 570 bb.0.entry: 571 liveins: $v8m4, $v12m4 572 573 ; RV32I-LABEL: name: test_nxv8i32 574 ; RV32I: liveins: $v8m4, $v12m4 575 ; RV32I-NEXT: {{ $}} 576 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4 577 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4 578 ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF 579 ; RV32I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */ 580 ; RV32I-NEXT: $v8m4 = COPY [[PseudoVADD_VV_M4_]] 581 ; RV32I-NEXT: PseudoRET implicit $v8m4 582 ; 583 ; RV64I-LABEL: name: test_nxv8i32 584 ; RV64I: liveins: $v8m4, $v12m4 585 ; RV64I-NEXT: {{ $}} 586 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4 587 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4 588 ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF 589 ; RV64I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */ 590 ; RV64I-NEXT: $v8m4 = COPY [[PseudoVADD_VV_M4_]] 591 ; RV64I-NEXT: PseudoRET implicit $v8m4 592 %0:vrb(<vscale x 8 x s32>) = COPY $v8m4 593 %1:vrb(<vscale x 8 x s32>) = COPY $v12m4 594 %2:vrb(<vscale x 8 x s32>) = G_ADD %0, %1 595 $v8m4 = COPY %2(<vscale x 8 x s32>) 596 PseudoRET implicit $v8m4 597 598... 599--- 600name: test_nxv16i32 601legalized: true 602regBankSelected: true 603tracksRegLiveness: true 604body: | 605 bb.0.entry: 606 liveins: $v8m8, $v16m8 607 608 ; RV32I-LABEL: name: test_nxv16i32 609 ; RV32I: liveins: $v8m8, $v16m8 610 ; RV32I-NEXT: {{ $}} 611 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8 612 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8 613 ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF 614 ; RV32I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */ 615 ; RV32I-NEXT: $v8m8 = COPY [[PseudoVADD_VV_M8_]] 616 ; RV32I-NEXT: PseudoRET implicit $v8m8 617 ; 618 ; RV64I-LABEL: name: test_nxv16i32 619 ; RV64I: liveins: $v8m8, $v16m8 620 ; RV64I-NEXT: {{ $}} 621 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8 622 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8 623 ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF 624 ; RV64I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */ 625 ; RV64I-NEXT: $v8m8 = COPY [[PseudoVADD_VV_M8_]] 626 ; RV64I-NEXT: PseudoRET implicit $v8m8 627 %0:vrb(<vscale x 16 x s32>) = COPY $v8m8 628 %1:vrb(<vscale x 16 x s32>) = COPY $v16m8 629 %2:vrb(<vscale x 16 x s32>) = G_ADD %0, %1 630 $v8m8 = COPY %2(<vscale x 16 x s32>) 631 PseudoRET implicit $v8m8 632 633... 634--- 635name: test_nxv1i64 636legalized: true 637regBankSelected: true 638tracksRegLiveness: true 639body: | 640 bb.0.entry: 641 liveins: $v8, $v9 642 643 ; RV32I-LABEL: name: test_nxv1i64 644 ; RV32I: liveins: $v8, $v9 645 ; RV32I-NEXT: {{ $}} 646 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 647 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9 648 ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF 649 ; RV32I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */ 650 ; RV32I-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]] 651 ; RV32I-NEXT: PseudoRET implicit $v8 652 ; 653 ; RV64I-LABEL: name: test_nxv1i64 654 ; RV64I: liveins: $v8, $v9 655 ; RV64I-NEXT: {{ $}} 656 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 657 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9 658 ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF 659 ; RV64I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */ 660 ; RV64I-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]] 661 ; RV64I-NEXT: PseudoRET implicit $v8 662 %0:vrb(<vscale x 1 x s64>) = COPY $v8 663 %1:vrb(<vscale x 1 x s64>) = COPY $v9 664 %2:vrb(<vscale x 1 x s64>) = G_ADD %0, %1 665 $v8 = COPY %2(<vscale x 1 x s64>) 666 PseudoRET implicit $v8 667 668... 669--- 670name: test_nxv2i64 671legalized: true 672regBankSelected: true 673tracksRegLiveness: true 674body: | 675 bb.0.entry: 676 liveins: $v8m2, $v10m2 677 678 ; RV32I-LABEL: name: test_nxv2i64 679 ; RV32I: liveins: $v8m2, $v10m2 680 ; RV32I-NEXT: {{ $}} 681 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2 682 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2 683 ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF 684 ; RV32I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */ 685 ; RV32I-NEXT: $v8m2 = COPY [[PseudoVADD_VV_M2_]] 686 ; RV32I-NEXT: PseudoRET implicit $v8m2 687 ; 688 ; RV64I-LABEL: name: test_nxv2i64 689 ; RV64I: liveins: $v8m2, $v10m2 690 ; RV64I-NEXT: {{ $}} 691 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2 692 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2 693 ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF 694 ; RV64I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */ 695 ; RV64I-NEXT: $v8m2 = COPY [[PseudoVADD_VV_M2_]] 696 ; RV64I-NEXT: PseudoRET implicit $v8m2 697 %0:vrb(<vscale x 2 x s64>) = COPY $v8m2 698 %1:vrb(<vscale x 2 x s64>) = COPY $v10m2 699 %2:vrb(<vscale x 2 x s64>) = G_ADD %0, %1 700 $v8m2 = COPY %2(<vscale x 2 x s64>) 701 PseudoRET implicit $v8m2 702 703... 704--- 705name: test_nxv4i64 706legalized: true 707regBankSelected: true 708tracksRegLiveness: true 709body: | 710 bb.0.entry: 711 liveins: $v8m4, $v12m4 712 713 ; RV32I-LABEL: name: test_nxv4i64 714 ; RV32I: liveins: $v8m4, $v12m4 715 ; RV32I-NEXT: {{ $}} 716 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4 717 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4 718 ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF 719 ; RV32I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */ 720 ; RV32I-NEXT: $v8m4 = COPY [[PseudoVADD_VV_M4_]] 721 ; RV32I-NEXT: PseudoRET implicit $v8m4 722 ; 723 ; RV64I-LABEL: name: test_nxv4i64 724 ; RV64I: liveins: $v8m4, $v12m4 725 ; RV64I-NEXT: {{ $}} 726 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4 727 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4 728 ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF 729 ; RV64I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */ 730 ; RV64I-NEXT: $v8m4 = COPY [[PseudoVADD_VV_M4_]] 731 ; RV64I-NEXT: PseudoRET implicit $v8m4 732 %0:vrb(<vscale x 4 x s64>) = COPY $v8m4 733 %1:vrb(<vscale x 4 x s64>) = COPY $v12m4 734 %2:vrb(<vscale x 4 x s64>) = G_ADD %0, %1 735 $v8m4 = COPY %2(<vscale x 4 x s64>) 736 PseudoRET implicit $v8m4 737 738... 739--- 740name: test_nxv8i64 741legalized: true 742regBankSelected: true 743tracksRegLiveness: true 744body: | 745 bb.0.entry: 746 liveins: $v8m8, $v16m8 747 748 ; RV32I-LABEL: name: test_nxv8i64 749 ; RV32I: liveins: $v8m8, $v16m8 750 ; RV32I-NEXT: {{ $}} 751 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8 752 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8 753 ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF 754 ; RV32I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */ 755 ; RV32I-NEXT: $v8m8 = COPY [[PseudoVADD_VV_M8_]] 756 ; RV32I-NEXT: PseudoRET implicit $v8m8 757 ; 758 ; RV64I-LABEL: name: test_nxv8i64 759 ; RV64I: liveins: $v8m8, $v16m8 760 ; RV64I-NEXT: {{ $}} 761 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8 762 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8 763 ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF 764 ; RV64I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */ 765 ; RV64I-NEXT: $v8m8 = COPY [[PseudoVADD_VV_M8_]] 766 ; RV64I-NEXT: PseudoRET implicit $v8m8 767 %0:vrb(<vscale x 8 x s64>) = COPY $v8m8 768 %1:vrb(<vscale x 8 x s64>) = COPY $v16m8 769 %2:vrb(<vscale x 8 x s64>) = G_ADD %0, %1 770 $v8m8 = COPY %2(<vscale x 8 x s64>) 771 PseudoRET implicit $v8m8 772 773... 774 775