xref: /llvm-project/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/select.mir (revision 05840c8714d798c8f2873c205aeac146b4993ae5)
1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV32I %s
3# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV64I %s
4
5---
6name:            select_nxv1i8
7legalized:       true
8regBankSelected: true
9tracksRegLiveness: true
10body:             |
11  bb.0.entry:
12    ; RV32I-LABEL: name: select_nxv1i8
13    ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
14    ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
15    ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
16    ; RV32I-NEXT: $v0 = COPY [[DEF]]
17    ; RV32I-NEXT: [[PseudoVMERGE_VVM_MF4_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF4 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 3 /* e8 */
18    ; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF4_]]
19    ; RV32I-NEXT: PseudoRET implicit $v8
20    ;
21    ; RV64I-LABEL: name: select_nxv1i8
22    ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
23    ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
24    ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
25    ; RV64I-NEXT: $v0 = COPY [[DEF]]
26    ; RV64I-NEXT: [[PseudoVMERGE_VVM_MF4_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF4 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 3 /* e8 */
27    ; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF4_]]
28    ; RV64I-NEXT: PseudoRET implicit $v8
29    %0:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
30    %1:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
31    %2:vrb(<vscale x 2 x s8>) = G_SELECT %0(<vscale x 2 x s1>), %1, %1
32    $v8 = COPY %2(<vscale x 2 x s8>)
33    PseudoRET implicit $v8
34
35...
36---
37name:            select_nxv4i8
38legalized:       true
39regBankSelected: true
40tracksRegLiveness: true
41body:             |
42  bb.0.entry:
43    ; RV32I-LABEL: name: select_nxv4i8
44    ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
45    ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
46    ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
47    ; RV32I-NEXT: $v0 = COPY [[DEF]]
48    ; RV32I-NEXT: [[PseudoVMERGE_VVM_M1_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 3 /* e8 */
49    ; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_M1_]]
50    ; RV32I-NEXT: PseudoRET implicit $v8
51    ;
52    ; RV64I-LABEL: name: select_nxv4i8
53    ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
54    ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
55    ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
56    ; RV64I-NEXT: $v0 = COPY [[DEF]]
57    ; RV64I-NEXT: [[PseudoVMERGE_VVM_M1_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 3 /* e8 */
58    ; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_M1_]]
59    ; RV64I-NEXT: PseudoRET implicit $v8
60    %0:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
61    %1:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
62    %2:vrb(<vscale x 8 x s8>) = G_SELECT %0(<vscale x 8 x s1>), %1, %1
63    $v8 = COPY %2(<vscale x 8 x s8>)
64    PseudoRET implicit $v8
65
66...
67---
68name:            select_nxv16i8
69legalized:       true
70regBankSelected: true
71tracksRegLiveness: true
72body:             |
73  bb.0.entry:
74    ; RV32I-LABEL: name: select_nxv16i8
75    ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
76    ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
77    ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm4nov0 = IMPLICIT_DEF
78    ; RV32I-NEXT: $v0 = COPY [[DEF]]
79    ; RV32I-NEXT: [[PseudoVMERGE_VVM_M4_:%[0-9]+]]:vrm4nov0 = PseudoVMERGE_VVM_M4 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 3 /* e8 */
80    ; RV32I-NEXT: $v8m4 = COPY [[PseudoVMERGE_VVM_M4_]]
81    ; RV32I-NEXT: PseudoRET implicit $v8m4
82    ;
83    ; RV64I-LABEL: name: select_nxv16i8
84    ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
85    ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
86    ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm4nov0 = IMPLICIT_DEF
87    ; RV64I-NEXT: $v0 = COPY [[DEF]]
88    ; RV64I-NEXT: [[PseudoVMERGE_VVM_M4_:%[0-9]+]]:vrm4nov0 = PseudoVMERGE_VVM_M4 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 3 /* e8 */
89    ; RV64I-NEXT: $v8m4 = COPY [[PseudoVMERGE_VVM_M4_]]
90    ; RV64I-NEXT: PseudoRET implicit $v8m4
91    %0:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
92    %1:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
93    %2:vrb(<vscale x 32 x s8>) = G_SELECT %0(<vscale x 32 x s1>), %1, %1
94    $v8m4 = COPY %2(<vscale x 32 x s8>)
95    PseudoRET implicit $v8m4
96
97...
98---
99name:            select_nxv64i8
100legalized:       true
101regBankSelected: true
102tracksRegLiveness: true
103body:             |
104  bb.0.entry:
105    ; RV32I-LABEL: name: select_nxv64i8
106    ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
107    ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
108    ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
109    ; RV32I-NEXT: $v0 = COPY [[DEF]]
110    ; RV32I-NEXT: [[PseudoVMERGE_VVM_MF4_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF4 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 4 /* e16 */
111    ; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF4_]]
112    ; RV32I-NEXT: PseudoRET implicit $v8
113    ;
114    ; RV64I-LABEL: name: select_nxv64i8
115    ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
116    ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
117    ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
118    ; RV64I-NEXT: $v0 = COPY [[DEF]]
119    ; RV64I-NEXT: [[PseudoVMERGE_VVM_MF4_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF4 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 4 /* e16 */
120    ; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF4_]]
121    ; RV64I-NEXT: PseudoRET implicit $v8
122    %0:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
123    %1:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
124    %2:vrb(<vscale x 1 x s16>) = G_SELECT %0(<vscale x 1 x s1>), %1, %1
125    $v8 = COPY %2(<vscale x 1 x s16>)
126    PseudoRET implicit $v8
127
128...
129---
130name:            select_nxv2i16
131legalized:       true
132regBankSelected: true
133tracksRegLiveness: true
134body:             |
135  bb.0.entry:
136    ; RV32I-LABEL: name: select_nxv2i16
137    ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
138    ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
139    ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
140    ; RV32I-NEXT: $v0 = COPY [[DEF]]
141    ; RV32I-NEXT: [[PseudoVMERGE_VVM_M1_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 4 /* e16 */
142    ; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_M1_]]
143    ; RV32I-NEXT: PseudoRET implicit $v8
144    ;
145    ; RV64I-LABEL: name: select_nxv2i16
146    ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
147    ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
148    ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
149    ; RV64I-NEXT: $v0 = COPY [[DEF]]
150    ; RV64I-NEXT: [[PseudoVMERGE_VVM_M1_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 4 /* e16 */
151    ; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_M1_]]
152    ; RV64I-NEXT: PseudoRET implicit $v8
153    %0:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
154    %1:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
155    %2:vrb(<vscale x 4 x s16>) = G_SELECT %0(<vscale x 4 x s1>), %1, %1
156    $v8 = COPY %2(<vscale x 4 x s16>)
157    PseudoRET implicit $v8
158
159...
160---
161name:            select_nxv8i16
162legalized:       true
163regBankSelected: true
164tracksRegLiveness: true
165body:             |
166  bb.0.entry:
167    ; RV32I-LABEL: name: select_nxv8i16
168    ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
169    ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
170    ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm4nov0 = IMPLICIT_DEF
171    ; RV32I-NEXT: $v0 = COPY [[DEF]]
172    ; RV32I-NEXT: [[PseudoVMERGE_VVM_M4_:%[0-9]+]]:vrm4nov0 = PseudoVMERGE_VVM_M4 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 4 /* e16 */
173    ; RV32I-NEXT: $v8m4 = COPY [[PseudoVMERGE_VVM_M4_]]
174    ; RV32I-NEXT: PseudoRET implicit $v8m4
175    ;
176    ; RV64I-LABEL: name: select_nxv8i16
177    ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
178    ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
179    ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm4nov0 = IMPLICIT_DEF
180    ; RV64I-NEXT: $v0 = COPY [[DEF]]
181    ; RV64I-NEXT: [[PseudoVMERGE_VVM_M4_:%[0-9]+]]:vrm4nov0 = PseudoVMERGE_VVM_M4 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 4 /* e16 */
182    ; RV64I-NEXT: $v8m4 = COPY [[PseudoVMERGE_VVM_M4_]]
183    ; RV64I-NEXT: PseudoRET implicit $v8m4
184    %0:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
185    %1:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
186    %2:vrb(<vscale x 16 x s16>) = G_SELECT %0(<vscale x 16 x s1>), %1, %1
187    $v8m4 = COPY %2(<vscale x 16 x s16>)
188    PseudoRET implicit $v8m4
189
190...
191---
192name:            select_nxv32i16
193legalized:       true
194regBankSelected: true
195tracksRegLiveness: true
196body:             |
197  bb.0.entry:
198    ; RV32I-LABEL: name: select_nxv32i16
199    ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
200    ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
201    ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
202    ; RV32I-NEXT: $v0 = COPY [[DEF]]
203    ; RV32I-NEXT: [[PseudoVMERGE_VVM_MF2_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF2 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 5 /* e32 */
204    ; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF2_]]
205    ; RV32I-NEXT: PseudoRET implicit $v8
206    ;
207    ; RV64I-LABEL: name: select_nxv32i16
208    ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
209    ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
210    ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
211    ; RV64I-NEXT: $v0 = COPY [[DEF]]
212    ; RV64I-NEXT: [[PseudoVMERGE_VVM_MF2_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF2 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 5 /* e32 */
213    ; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF2_]]
214    ; RV64I-NEXT: PseudoRET implicit $v8
215    %0:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
216    %1:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
217    %2:vrb(<vscale x 1 x s32>) = G_SELECT %0(<vscale x 1 x s1>), %1, %1
218    $v8 = COPY %2(<vscale x 1 x s32>)
219    PseudoRET implicit $v8
220
221...
222---
223name:            select_nxv2i32
224legalized:       true
225regBankSelected: true
226tracksRegLiveness: true
227body:             |
228  bb.0.entry:
229    ; RV32I-LABEL: name: select_nxv2i32
230    ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
231    ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
232    ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm2nov0 = IMPLICIT_DEF
233    ; RV32I-NEXT: $v0 = COPY [[DEF]]
234    ; RV32I-NEXT: [[PseudoVMERGE_VVM_M2_:%[0-9]+]]:vrm2nov0 = PseudoVMERGE_VVM_M2 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 5 /* e32 */
235    ; RV32I-NEXT: $v8m2 = COPY [[PseudoVMERGE_VVM_M2_]]
236    ; RV32I-NEXT: PseudoRET implicit $v8m2
237    ;
238    ; RV64I-LABEL: name: select_nxv2i32
239    ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
240    ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
241    ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm2nov0 = IMPLICIT_DEF
242    ; RV64I-NEXT: $v0 = COPY [[DEF]]
243    ; RV64I-NEXT: [[PseudoVMERGE_VVM_M2_:%[0-9]+]]:vrm2nov0 = PseudoVMERGE_VVM_M2 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 5 /* e32 */
244    ; RV64I-NEXT: $v8m2 = COPY [[PseudoVMERGE_VVM_M2_]]
245    ; RV64I-NEXT: PseudoRET implicit $v8m2
246    %0:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
247    %1:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
248    %2:vrb(<vscale x 4 x s32>) = G_SELECT %0(<vscale x 4 x s1>), %1, %1
249    $v8m2 = COPY %2(<vscale x 4 x s32>)
250    PseudoRET implicit $v8m2
251
252...
253---
254name:            select_nxv8i32
255legalized:       true
256regBankSelected: true
257tracksRegLiveness: true
258body:             |
259  bb.0.entry:
260    ; RV32I-LABEL: name: select_nxv8i32
261    ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
262    ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
263    ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm8nov0 = IMPLICIT_DEF
264    ; RV32I-NEXT: $v0 = COPY [[DEF]]
265    ; RV32I-NEXT: [[PseudoVMERGE_VVM_M8_:%[0-9]+]]:vrm8nov0 = PseudoVMERGE_VVM_M8 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 5 /* e32 */
266    ; RV32I-NEXT: $v8m8 = COPY [[PseudoVMERGE_VVM_M8_]]
267    ; RV32I-NEXT: PseudoRET implicit $v8m8
268    ;
269    ; RV64I-LABEL: name: select_nxv8i32
270    ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
271    ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
272    ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm8nov0 = IMPLICIT_DEF
273    ; RV64I-NEXT: $v0 = COPY [[DEF]]
274    ; RV64I-NEXT: [[PseudoVMERGE_VVM_M8_:%[0-9]+]]:vrm8nov0 = PseudoVMERGE_VVM_M8 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 5 /* e32 */
275    ; RV64I-NEXT: $v8m8 = COPY [[PseudoVMERGE_VVM_M8_]]
276    ; RV64I-NEXT: PseudoRET implicit $v8m8
277    %0:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
278    %1:vrb(<vscale x 16 x s32>) = G_IMPLICIT_DEF
279    %2:vrb(<vscale x 16 x s32>) = G_SELECT %0(<vscale x 16 x s1>), %1, %1
280    $v8m8 = COPY %2(<vscale x 16 x s32>)
281    PseudoRET implicit $v8m8
282
283...
284---
285name:            select_nxv1i64
286legalized:       true
287regBankSelected: true
288tracksRegLiveness: true
289body:             |
290  bb.0.entry:
291    ; RV32I-LABEL: name: select_nxv1i64
292    ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
293    ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
294    ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm2nov0 = IMPLICIT_DEF
295    ; RV32I-NEXT: $v0 = COPY [[DEF]]
296    ; RV32I-NEXT: [[PseudoVMERGE_VVM_M2_:%[0-9]+]]:vrm2nov0 = PseudoVMERGE_VVM_M2 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 6 /* e64 */
297    ; RV32I-NEXT: $v8m2 = COPY [[PseudoVMERGE_VVM_M2_]]
298    ; RV32I-NEXT: PseudoRET implicit $v8m2
299    ;
300    ; RV64I-LABEL: name: select_nxv1i64
301    ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
302    ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
303    ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm2nov0 = IMPLICIT_DEF
304    ; RV64I-NEXT: $v0 = COPY [[DEF]]
305    ; RV64I-NEXT: [[PseudoVMERGE_VVM_M2_:%[0-9]+]]:vrm2nov0 = PseudoVMERGE_VVM_M2 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 6 /* e64 */
306    ; RV64I-NEXT: $v8m2 = COPY [[PseudoVMERGE_VVM_M2_]]
307    ; RV64I-NEXT: PseudoRET implicit $v8m2
308    %0:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
309    %1:vrb(<vscale x 2 x s64>) = G_IMPLICIT_DEF
310    %2:vrb(<vscale x 2 x s64>) = G_SELECT %0(<vscale x 2 x s1>), %1, %1
311    $v8m2 = COPY %2(<vscale x 2 x s64>)
312    PseudoRET implicit $v8m2
313
314...
315---
316name:            select_nxv4i64
317legalized:       true
318regBankSelected: true
319tracksRegLiveness: true
320body:             |
321  bb.0.entry:
322    ; RV32I-LABEL: name: select_nxv4i64
323    ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
324    ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
325    ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm8nov0 = IMPLICIT_DEF
326    ; RV32I-NEXT: $v0 = COPY [[DEF]]
327    ; RV32I-NEXT: [[PseudoVMERGE_VVM_M8_:%[0-9]+]]:vrm8nov0 = PseudoVMERGE_VVM_M8 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 6 /* e64 */
328    ; RV32I-NEXT: $v8m8 = COPY [[PseudoVMERGE_VVM_M8_]]
329    ; RV32I-NEXT: PseudoRET implicit $v8m8
330    ;
331    ; RV64I-LABEL: name: select_nxv4i64
332    ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
333    ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
334    ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm8nov0 = IMPLICIT_DEF
335    ; RV64I-NEXT: $v0 = COPY [[DEF]]
336    ; RV64I-NEXT: [[PseudoVMERGE_VVM_M8_:%[0-9]+]]:vrm8nov0 = PseudoVMERGE_VVM_M8 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 6 /* e64 */
337    ; RV64I-NEXT: $v8m8 = COPY [[PseudoVMERGE_VVM_M8_]]
338    ; RV64I-NEXT: PseudoRET implicit $v8m8
339    %0:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
340    %1:vrb(<vscale x 8 x s64>) = G_IMPLICIT_DEF
341    %2:vrb(<vscale x 8 x s64>) = G_SELECT %0(<vscale x 8 x s1>), %1, %1
342    $v8m8 = COPY %2(<vscale x 8 x s64>)
343    PseudoRET implicit $v8m8
344
345...
346