xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir (revision 0c94915d34e6934c04140bb908364e54d1bc8ada)
1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2# RUN: llc %s -o - -mtriple=riscv64 -mattr=v -verify-machineinstrs \
3# RUN:     -run-pass=phi-node-elimination,register-coalescer,riscv-insert-vsetvli | FileCheck %s
4
5--- |
6  source_filename = "vsetvli-insert.ll"
7  target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
8  target triple = "riscv64"
9
10  define <vscale x 1 x i64> @load_add_or_sub(i8 zeroext %cond, ptr %0, <vscale x 1 x i64> %1, i64 %2) #0 {
11  entry:
12    %a = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64> undef, ptr %0, i64 %2)
13    %tobool = icmp eq i8 %cond, 0
14    br i1 %tobool, label %if.else, label %if.then
15
16  if.then:                                          ; preds = %entry
17    %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %a, <vscale x 1 x i64> %1, i64 %2)
18    br label %if.end
19
20  if.else:                                          ; preds = %entry
21    %c = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %a, <vscale x 1 x i64> %1, i64 %2)
22    br label %if.end
23
24  if.end:                                           ; preds = %if.else, %if.then
25    %d = phi <vscale x 1 x i64> [ %b, %if.then ], [ %c, %if.else ]
26    ret <vscale x 1 x i64> %d
27  }
28
29  define void @load_zext_or_sext(i8 zeroext %cond, ptr %0, ptr %1, i64 %2) #0 {
30  entry:
31    %a = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32> undef, ptr %0, i64 %2)
32    %tobool = icmp eq i8 %cond, 0
33    br i1 %tobool, label %if.else, label %if.then
34
35  if.then:                                          ; preds = %entry
36    %b = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> %a, i64 %2)
37    br label %if.end
38
39  if.else:                                          ; preds = %entry
40    %c = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> %a, i64 %2)
41    br label %if.end
42
43  if.end:                                           ; preds = %if.else, %if.then
44    %d = phi <vscale x 1 x i64> [ %b, %if.then ], [ %c, %if.else ]
45    call void @llvm.riscv.vse.nxv1i64.i64(<vscale x 1 x i64> %d, ptr %1, i64 %2)
46    ret void
47  }
48
49  declare i64 @llvm.riscv.vmv.x.s.nxv1i64(<vscale x 1 x i64>) #1
50
51  define i64 @vmv_x_s(i8 zeroext %cond, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) #0 {
52  entry:
53    %tobool = icmp eq i8 %cond, 0
54    br i1 %tobool, label %if.else, label %if.then
55
56  if.then:                                          ; preds = %entry
57    %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2)
58    br label %if.end
59
60  if.else:                                          ; preds = %entry
61    %b = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %1, i64 %2)
62    br label %if.end
63
64  if.end:                                           ; preds = %if.else, %if.then
65    %c = phi <vscale x 1 x i64> [ %a, %if.then ], [ %b, %if.else ]
66    %d = call i64 @llvm.riscv.vmv.x.s.nxv1i64(<vscale x 1 x i64> %c)
67    ret i64 %d
68  }
69
70  declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) #2
71
72  define <vscale x 1 x i64> @vsetvli_add_or_sub(i8 zeroext %cond, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %avl) #0 {
73  entry:
74    %vl = call i64 @llvm.riscv.vsetvli.i64(i64 %avl, i64 3, i64 0)
75    %tobool = icmp eq i8 %cond, 0
76    br i1 %tobool, label %if.else, label %if.then
77
78  if.then:                                          ; preds = %entry
79    %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %vl)
80    br label %if.end
81
82  if.else:                                          ; preds = %entry
83    %c = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %vl)
84    br label %if.end
85
86  if.end:                                           ; preds = %if.else, %if.then
87    %d = phi <vscale x 1 x i64> [ %b, %if.then ], [ %c, %if.else ]
88    ret <vscale x 1 x i64> %d
89  }
90
91  define void @vsetvli_vcpop() {
92    ret void
93  }
94
95  define void @vsetvli_loop_store() {
96    ret void
97  }
98
99  define void @vsetvli_loop_store2() {
100    ret void
101  }
102
103  define void @redusum_loop(ptr nocapture noundef readonly %a, i32 noundef signext %n, ptr nocapture noundef writeonly %res) #0 {
104  entry:
105    br label %vector.body
106
107  vector.body:                                      ; preds = %vector.body, %entry
108    %lsr.iv1 = phi ptr [ %scevgep, %vector.body ], [ %a, %entry ]
109    %lsr.iv = phi i64 [ %lsr.iv.next, %vector.body ], [ 2048, %entry ]
110    %vec.phi = phi <4 x i32> [ zeroinitializer, %entry ], [ %0, %vector.body ]
111    %lsr.iv12 = bitcast ptr %lsr.iv1 to ptr
112    %wide.load = load <4 x i32>, ptr %lsr.iv12, align 4
113    %0 = add <4 x i32> %wide.load, %vec.phi
114    %lsr.iv.next = add nsw i64 %lsr.iv, -4
115    %scevgep = getelementptr i32, ptr %lsr.iv1, i64 4
116    %1 = icmp eq i64 %lsr.iv.next, 0
117    br i1 %1, label %middle.block, label %vector.body
118
119  middle.block:                                     ; preds = %vector.body
120    %2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %0)
121    store i32 %2, ptr %res, align 4
122    ret void
123  }
124
125  define void @vsetvli_vluxei64_regression() {
126    ret void
127  }
128
129  define void @if_in_loop() {
130    ret void
131  }
132
133  define void @pre_undemanded_vl() {
134    ret void
135  }
136
137  define void @clobberred_forwarded_avl() {
138    ret void
139  }
140
141  define void @clobberred_forwarded_phi_avl() {
142    ret void
143  }
144
145  declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
146
147  declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, i64) #1
148
149  declare <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, i64) #1
150
151  declare <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64>, ptr nocapture, i64) #3
152
153  declare <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32>, ptr nocapture, i64) #3
154
155  declare void @llvm.riscv.vse.nxv1i64.i64(<vscale x 1 x i64>, ptr nocapture, i64) #4
156
157  declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>, <vscale x 1 x i32>, i64) #1
158
159  declare <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>, <vscale x 1 x i32>, i64) #1
160
161  attributes #0 = { "target-features"="+v" }
162  attributes #1 = { nounwind readnone }
163  attributes #2 = { nounwind }
164  attributes #3 = { nounwind readonly }
165  attributes #4 = { nounwind writeonly }
166
167...
168---
169name:            load_add_or_sub
170alignment:       4
171tracksRegLiveness: true
172registers:
173  - { id: 0, class: vr }
174  - { id: 1, class: vr }
175  - { id: 2, class: vr }
176  - { id: 3, class: vr }
177  - { id: 4, class: gpr }
178  - { id: 5, class: gpr }
179  - { id: 6, class: vr }
180  - { id: 7, class: gprnox0 }
181  - { id: 8, class: gpr }
182liveins:
183  - { reg: '$x10', virtual-reg: '%4' }
184  - { reg: '$x11', virtual-reg: '%5' }
185  - { reg: '$v8', virtual-reg: '%6' }
186  - { reg: '$x12', virtual-reg: '%7' }
187frameInfo:
188  maxAlignment:    1
189machineFunctionInfo: {}
190body:             |
191  ; CHECK-LABEL: name: load_add_or_sub
192  ; CHECK: bb.0.entry:
193  ; CHECK-NEXT:   successors: %bb.2(0x30000000), %bb.1(0x50000000)
194  ; CHECK-NEXT:   liveins: $x10, $x11, $v8, $x12
195  ; CHECK-NEXT: {{  $}}
196  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x12
197  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v8
198  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x11
199  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
200  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
201  ; CHECK-NEXT:   [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 undef $noreg, [[COPY2]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
202  ; CHECK-NEXT:   BEQ [[COPY3]], $x0, %bb.2
203  ; CHECK-NEXT:   PseudoBR %bb.1
204  ; CHECK-NEXT: {{  $}}
205  ; CHECK-NEXT: bb.1.if.then:
206  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
207  ; CHECK-NEXT: {{  $}}
208  ; CHECK-NEXT:   [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 undef $noreg, [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
209  ; CHECK-NEXT:   PseudoBR %bb.3
210  ; CHECK-NEXT: {{  $}}
211  ; CHECK-NEXT: bb.2.if.else:
212  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
213  ; CHECK-NEXT: {{  $}}
214  ; CHECK-NEXT:   [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 undef $noreg, [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
215  ; CHECK-NEXT: {{  $}}
216  ; CHECK-NEXT: bb.3.if.end:
217  ; CHECK-NEXT:   $v8 = COPY [[PseudoVADD_VV_M1_]]
218  ; CHECK-NEXT:   PseudoRET implicit $v8
219  bb.0.entry:
220    successors: %bb.2(0x30000000), %bb.1(0x50000000)
221    liveins: $x10, $x11, $v8, $x12
222
223    %7:gprnox0 = COPY $x12
224    %6:vr = COPY $v8
225    %5:gpr = COPY $x11
226    %4:gpr = COPY $x10
227    %0:vr = PseudoVLE64_V_M1 undef $noreg, %5, %7, 6, 0
228    %8:gpr = COPY $x0
229    BEQ %4, %8, %bb.2
230    PseudoBR %bb.1
231
232  bb.1.if.then:
233    %1:vr = PseudoVADD_VV_M1 undef $noreg, %0, %6, %7, 6, 0
234    PseudoBR %bb.3
235
236  bb.2.if.else:
237    %2:vr = PseudoVSUB_VV_M1 undef $noreg, %0, %6, %7, 6, 0
238
239  bb.3.if.end:
240    %3:vr = PHI %1, %bb.1, %2, %bb.2
241    $v8 = COPY %3
242    PseudoRET implicit $v8
243
244...
245---
246name:            load_zext_or_sext
247alignment:       4
248tracksRegLiveness: true
249registers:
250  - { id: 0, class: vr }
251  - { id: 1, class: vr }
252  - { id: 2, class: vr }
253  - { id: 3, class: vr }
254  - { id: 4, class: gpr }
255  - { id: 5, class: gpr }
256  - { id: 6, class: gpr }
257  - { id: 7, class: gprnox0 }
258  - { id: 8, class: gpr }
259liveins:
260  - { reg: '$x10', virtual-reg: '%4' }
261  - { reg: '$x11', virtual-reg: '%5' }
262  - { reg: '$x12', virtual-reg: '%6' }
263  - { reg: '$x13', virtual-reg: '%7' }
264frameInfo:
265  maxAlignment:    1
266machineFunctionInfo: {}
267body:             |
268  ; CHECK-LABEL: name: load_zext_or_sext
269  ; CHECK: bb.0.entry:
270  ; CHECK-NEXT:   successors: %bb.2(0x30000000), %bb.1(0x50000000)
271  ; CHECK-NEXT:   liveins: $x10, $x11, $x12, $x13
272  ; CHECK-NEXT: {{  $}}
273  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x13
274  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x12
275  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x11
276  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
277  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLI [[COPY]], 215 /* e32, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype
278  ; CHECK-NEXT:   [[PseudoVLE32_V_MF2_:%[0-9]+]]:vr = PseudoVLE32_V_MF2 undef $noreg, [[COPY2]], $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
279  ; CHECK-NEXT:   BEQ [[COPY3]], $x0, %bb.2
280  ; CHECK-NEXT:   PseudoBR %bb.1
281  ; CHECK-NEXT: {{  $}}
282  ; CHECK-NEXT: bb.1.if.then:
283  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
284  ; CHECK-NEXT: {{  $}}
285  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
286  ; CHECK-NEXT:   early-clobber %9:vr = PseudoVZEXT_VF2_M1 undef $noreg, [[PseudoVLE32_V_MF2_]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
287  ; CHECK-NEXT:   PseudoBR %bb.3
288  ; CHECK-NEXT: {{  $}}
289  ; CHECK-NEXT: bb.2.if.else:
290  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
291  ; CHECK-NEXT: {{  $}}
292  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
293  ; CHECK-NEXT:   early-clobber %9:vr = PseudoVSEXT_VF2_M1 undef $noreg, [[PseudoVLE32_V_MF2_]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
294  ; CHECK-NEXT: {{  $}}
295  ; CHECK-NEXT: bb.3.if.end:
296  ; CHECK-NEXT:   PseudoVSE64_V_M1 %9, [[COPY1]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype
297  ; CHECK-NEXT:   PseudoRET
298  bb.0.entry:
299    successors: %bb.2(0x30000000), %bb.1(0x50000000)
300    liveins: $x10, $x11, $x12, $x13
301
302    %7:gprnox0 = COPY $x13
303    %6:gpr = COPY $x12
304    %5:gpr = COPY $x11
305    %4:gpr = COPY $x10
306    %0:vr = PseudoVLE32_V_MF2 undef $noreg, %5, %7, 5, 0
307    %8:gpr = COPY $x0
308    BEQ %4, %8, %bb.2
309    PseudoBR %bb.1
310
311  bb.1.if.then:
312    early-clobber %1:vr = PseudoVZEXT_VF2_M1 undef $noreg, %0, %7, 6, 0
313    PseudoBR %bb.3
314
315  bb.2.if.else:
316    early-clobber %2:vr = PseudoVSEXT_VF2_M1 undef $noreg, %0, %7, 6, 0
317
318  bb.3.if.end:
319    %3:vr = PHI %1, %bb.1, %2, %bb.2
320    PseudoVSE64_V_M1 %3, %6, %7, 6
321    PseudoRET
322
323...
324---
325name:            vmv_x_s
326alignment:       4
327tracksRegLiveness: true
328registers:
329  - { id: 0, class: vr }
330  - { id: 1, class: vr }
331  - { id: 2, class: vr }
332  - { id: 3, class: gpr }
333  - { id: 4, class: vr }
334  - { id: 5, class: vr }
335  - { id: 6, class: gprnox0 }
336  - { id: 7, class: gpr }
337  - { id: 8, class: gpr }
338liveins:
339  - { reg: '$x10', virtual-reg: '%3' }
340  - { reg: '$v8', virtual-reg: '%4' }
341  - { reg: '$v9', virtual-reg: '%5' }
342  - { reg: '$x11', virtual-reg: '%6' }
343frameInfo:
344  maxAlignment:    1
345machineFunctionInfo: {}
346body:             |
347  ; CHECK-LABEL: name: vmv_x_s
348  ; CHECK: bb.0.entry:
349  ; CHECK-NEXT:   successors: %bb.2(0x30000000), %bb.1(0x50000000)
350  ; CHECK-NEXT:   liveins: $x10, $v8, $v9, $x11
351  ; CHECK-NEXT: {{  $}}
352  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
353  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v9
354  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v8
355  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
356  ; CHECK-NEXT:   BEQ [[COPY3]], $x0, %bb.2
357  ; CHECK-NEXT:   PseudoBR %bb.1
358  ; CHECK-NEXT: {{  $}}
359  ; CHECK-NEXT: bb.1.if.then:
360  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
361  ; CHECK-NEXT: {{  $}}
362  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
363  ; CHECK-NEXT:   [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 undef $noreg, [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
364  ; CHECK-NEXT:   PseudoBR %bb.3
365  ; CHECK-NEXT: {{  $}}
366  ; CHECK-NEXT: bb.2.if.else:
367  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
368  ; CHECK-NEXT: {{  $}}
369  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
370  ; CHECK-NEXT:   [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 undef $noreg, [[COPY1]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
371  ; CHECK-NEXT: {{  $}}
372  ; CHECK-NEXT: bb.3.if.end:
373  ; CHECK-NEXT:   [[PseudoVMV_X_S:%[0-9]+]]:gpr = PseudoVMV_X_S [[PseudoVADD_VV_M1_]], 6 /* e64 */, implicit $vtype
374  ; CHECK-NEXT:   $x10 = COPY [[PseudoVMV_X_S]]
375  ; CHECK-NEXT:   PseudoRET implicit $x10
376  bb.0.entry:
377    successors: %bb.2(0x30000000), %bb.1(0x50000000)
378    liveins: $x10, $v8, $v9, $x11
379
380    %6:gprnox0 = COPY $x11
381    %5:vr = COPY $v9
382    %4:vr = COPY $v8
383    %3:gpr = COPY $x10
384    %7:gpr = COPY $x0
385    BEQ %3, %7, %bb.2
386    PseudoBR %bb.1
387
388  bb.1.if.then:
389    %0:vr = PseudoVADD_VV_M1 undef $noreg, %4, %5, %6, 6, 0
390    PseudoBR %bb.3
391
392  bb.2.if.else:
393    %1:vr = PseudoVSUB_VV_M1 undef $noreg, %5, %5, %6, 6, 0
394
395  bb.3.if.end:
396    %2:vr = PHI %0, %bb.1, %1, %bb.2
397    %8:gpr = PseudoVMV_X_S %2, 6
398    $x10 = COPY %8
399    PseudoRET implicit $x10
400
401...
402---
403name:            vsetvli_add_or_sub
404alignment:       4
405tracksRegLiveness: true
406registers:
407  - { id: 0, class: gprnox0 }
408  - { id: 1, class: vr }
409  - { id: 2, class: vr }
410  - { id: 3, class: vr }
411  - { id: 4, class: gpr }
412  - { id: 5, class: vr }
413  - { id: 6, class: vr }
414  - { id: 7, class: gprnox0 }
415  - { id: 8, class: gpr }
416liveins:
417  - { reg: '$x10', virtual-reg: '%4' }
418  - { reg: '$v8', virtual-reg: '%5' }
419  - { reg: '$v9', virtual-reg: '%6' }
420  - { reg: '$x11', virtual-reg: '%7' }
421frameInfo:
422  maxAlignment:    1
423machineFunctionInfo: {}
424body:             |
425  ; CHECK-LABEL: name: vsetvli_add_or_sub
426  ; CHECK: bb.0.entry:
427  ; CHECK-NEXT:   successors: %bb.2(0x30000000), %bb.1(0x50000000)
428  ; CHECK-NEXT:   liveins: $x10, $v8, $v9, $x11
429  ; CHECK-NEXT: {{  $}}
430  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
431  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v9
432  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v8
433  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
434  ; CHECK-NEXT:   dead [[PseudoVSETVLI:%[0-9]+]]:gprnox0 = PseudoVSETVLI [[COPY]], 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
435  ; CHECK-NEXT:   BEQ [[COPY3]], $x0, %bb.2
436  ; CHECK-NEXT:   PseudoBR %bb.1
437  ; CHECK-NEXT: {{  $}}
438  ; CHECK-NEXT: bb.1.if.then:
439  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
440  ; CHECK-NEXT: {{  $}}
441  ; CHECK-NEXT:   [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 undef $noreg, [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
442  ; CHECK-NEXT:   PseudoBR %bb.3
443  ; CHECK-NEXT: {{  $}}
444  ; CHECK-NEXT: bb.2.if.else:
445  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
446  ; CHECK-NEXT: {{  $}}
447  ; CHECK-NEXT:   [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 undef $noreg, [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
448  ; CHECK-NEXT: {{  $}}
449  ; CHECK-NEXT: bb.3.if.end:
450  ; CHECK-NEXT:   $v8 = COPY [[PseudoVADD_VV_M1_]]
451  ; CHECK-NEXT:   PseudoRET implicit $v8
452  bb.0.entry:
453    successors: %bb.2(0x30000000), %bb.1(0x50000000)
454    liveins: $x10, $v8, $v9, $x11
455
456    %7:gprnox0 = COPY $x11
457    %6:vr = COPY $v9
458    %5:vr = COPY $v8
459    %4:gpr = COPY $x10
460    %0:gprnox0 = PseudoVSETVLI %7, 88, implicit-def dead $vl, implicit-def dead $vtype
461    %8:gpr = COPY $x0
462    BEQ %4, %8, %bb.2
463    PseudoBR %bb.1
464
465  bb.1.if.then:
466    %1:vr = PseudoVADD_VV_M1 undef $noreg, %5, %6, %0, 6, 0
467    PseudoBR %bb.3
468
469  bb.2.if.else:
470    %2:vr = PseudoVSUB_VV_M1 undef $noreg, %5, %6, %0, 6, 0
471
472  bb.3.if.end:
473    %3:vr = PHI %1, %bb.1, %2, %bb.2
474    $v8 = COPY %3
475    PseudoRET implicit $v8
476
477...
478---
479name:            vsetvli_vcpop
480tracksRegLiveness: true
481registers:
482  - { id: 0, class: gpr, preferred-register: '' }
483  - { id: 1, class: gpr, preferred-register: '' }
484  - { id: 2, class: gpr, preferred-register: '' }
485  - { id: 3, class: vr, preferred-register: '' }
486  - { id: 4, class: vrnov0, preferred-register: '' }
487  - { id: 5, class: vmv0, preferred-register: '' }
488  - { id: 6, class: vrnov0, preferred-register: '' }
489  - { id: 7, class: gpr, preferred-register: '' }
490  - { id: 8, class: gpr, preferred-register: '' }
491  - { id: 9, class: gpr, preferred-register: '' }
492  - { id: 10, class: gpr, preferred-register: '' }
493  - { id: 11, class: vr, preferred-register: '' }
494body:             |
495  ; CHECK-LABEL: name: vsetvli_vcpop
496  ; CHECK: bb.0:
497  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
498  ; CHECK-NEXT:   liveins: $x10, $x11
499  ; CHECK-NEXT: {{  $}}
500  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x11
501  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
502  ; CHECK-NEXT:   dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 killed $x0, 223 /* e64, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype
503  ; CHECK-NEXT:   [[PseudoVID_V_MF2_:%[0-9]+]]:vr = PseudoVID_V_MF2 undef $noreg, -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
504  ; CHECK-NEXT:   dead [[PseudoVSETVLIX0_1:%[0-9]+]]:gpr = PseudoVSETVLIX0 killed $x0, 215 /* e32, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype
505  ; CHECK-NEXT:   [[PseudoVMV_V_I_MF2_:%[0-9]+]]:vrnov0 = PseudoVMV_V_I_MF2 undef $noreg, 0, -1, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
506  ; CHECK-NEXT: {{  $}}
507  ; CHECK-NEXT: bb.1:
508  ; CHECK-NEXT:   successors: %bb.2(0x40000000), %bb.3(0x40000000)
509  ; CHECK-NEXT: {{  $}}
510  ; CHECK-NEXT:   [[PseudoVMSEQ_VI_MF2_:%[0-9]+]]:vmv0 = PseudoVMSEQ_VI_MF2 [[PseudoVID_V_MF2_]], 0, -1, 5 /* e32 */, implicit $vl, implicit $vtype
511  ; CHECK-NEXT:   $v0 = COPY [[PseudoVMSEQ_VI_MF2_]]
512  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLIX0 killed $x0, 23 /* e32, mf2, tu, mu */, implicit-def $vl, implicit-def $vtype, implicit $vl
513  ; CHECK-NEXT:   [[PseudoVLE32_V_MF2_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_MF2_MASK [[PseudoVMV_V_I_MF2_]], [[COPY]], $v0, -1, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
514  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLIX0 killed $x0, 197 /* e8, mf8, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
515  ; CHECK-NEXT:   [[PseudoVCPOP_M_B64_:%[0-9]+]]:gpr = PseudoVCPOP_M_B64 [[PseudoVMSEQ_VI_MF2_]], -1, 0 /* e8 */, implicit $vl, implicit $vtype
516  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:gpr = IMPLICIT_DEF
517  ; CHECK-NEXT:   BEQ [[PseudoVCPOP_M_B64_]], $x0, %bb.3
518  ; CHECK-NEXT:   PseudoBR %bb.2
519  ; CHECK-NEXT: {{  $}}
520  ; CHECK-NEXT: bb.2:
521  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
522  ; CHECK-NEXT: {{  $}}
523  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:gpr = LWU [[COPY1]], 0
524  ; CHECK-NEXT: {{  $}}
525  ; CHECK-NEXT: bb.3:
526  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLIX0 killed $x0, 215 /* e32, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
527  ; CHECK-NEXT:   [[PseudoVADD_VX_MF2_:%[0-9]+]]:vr = nsw PseudoVADD_VX_MF2 undef $noreg, [[PseudoVLE32_V_MF2_MASK]], [[DEF]], -1, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
528  ; CHECK-NEXT:   $v0 = COPY [[PseudoVADD_VX_MF2_]]
529  ; CHECK-NEXT:   PseudoRET implicit $v0
530  bb.0:
531    successors: %bb.1(0x80000000)
532    liveins: $x10, $x11
533
534    %0:gpr = COPY $x11
535    %1:gpr = COPY $x10
536    %2:gpr = IMPLICIT_DEF
537    %3:vr = PseudoVID_V_MF2 undef $noreg, -1, 6, 0
538    %4:vrnov0 = PseudoVMV_V_I_MF2 undef $noreg, 0, -1, 5, 0
539
540  bb.1:
541    successors: %bb.2(0x40000000), %bb.3(0x40000000)
542
543    %5:vmv0 = PseudoVMSEQ_VI_MF2 killed %3, 0, -1, 5
544    $v0 = COPY %5
545    %6:vrnov0 = PseudoVLE32_V_MF2_MASK %4, killed %0, $v0, -1, 5, 0
546    %7:gpr = PseudoVCPOP_M_B64 %5, -1, 0
547    %8:gpr = COPY $x0
548    BEQ killed %7, %8, %bb.3
549    PseudoBR %bb.2
550
551  bb.2:
552    successors: %bb.3(0x80000000)
553
554    %9:gpr = LWU %1, 0
555
556  bb.3:
557    %10:gpr = PHI %2, %bb.1, %9, %bb.2
558    %11:vr = nsw PseudoVADD_VX_MF2 undef $noreg, %6, %10, -1, 5, 0
559    $v0 = COPY %11
560    PseudoRET implicit $v0
561...
562---
563name:            vsetvli_loop_store
564tracksRegLiveness: true
565registers:
566  - { id: 0, class: gpr, preferred-register: '' }
567  - { id: 1, class: gpr, preferred-register: '' }
568  - { id: 2, class: gpr, preferred-register: '' }
569  - { id: 3, class: gpr, preferred-register: '' }
570  - { id: 4, class: vr,  preferred-register: '' }
571  - { id: 5, class: gpr, preferred-register: '' }
572  - { id: 6, class: gpr, preferred-register: '' }
573  - { id: 7, class: vr,  preferred-register: '' }
574  - { id: 8, class: gpr, preferred-register: '' }
575  - { id: 9, class: gpr, preferred-register: '' }
576  - { id: 10, class: gpr, preferred-register: '' }
577body:             |
578  ; CHECK-LABEL: name: vsetvli_loop_store
579  ; CHECK: bb.0:
580  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
581  ; CHECK-NEXT:   liveins: $x10, $x11
582  ; CHECK-NEXT: {{  $}}
583  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x10
584  ; CHECK-NEXT:   [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
585  ; CHECK-NEXT:   [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
586  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x11
587  ; CHECK-NEXT:   dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
588  ; CHECK-NEXT:   [[PseudoVID_V_M1_:%[0-9]+]]:vr = PseudoVID_V_M1 undef $noreg, -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
589  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x0
590  ; CHECK-NEXT: {{  $}}
591  ; CHECK-NEXT: bb.1:
592  ; CHECK-NEXT:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
593  ; CHECK-NEXT: {{  $}}
594  ; CHECK-NEXT:   [[PseudoVADD_VX_M1_:%[0-9]+]]:vr = PseudoVADD_VX_M1 undef $noreg, [[PseudoVID_V_M1_]], [[COPY2]], -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
595  ; CHECK-NEXT:   [[MUL:%[0-9]+]]:gpr = MUL [[COPY2]], [[SRLI]]
596  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], [[MUL]]
597  ; CHECK-NEXT:   PseudoVSE32_V_MF2 [[PseudoVADD_VX_M1_]], [[ADD]], -1, 5 /* e32 */, implicit $vl, implicit $vtype
598  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = ADDI [[COPY2]], 1
599  ; CHECK-NEXT:   BLTU [[COPY2]], [[COPY1]], %bb.1
600  ; CHECK-NEXT:   PseudoBR %bb.2
601  ; CHECK-NEXT: {{  $}}
602  ; CHECK-NEXT: bb.2:
603  ; CHECK-NEXT:   PseudoRET
604  bb.0:
605    liveins: $x10, $x11
606    %0:gpr = COPY $x10
607    %1:gpr = PseudoReadVLENB
608    %2:gpr = SRLI %1:gpr, 3
609    %3:gpr = COPY $x11
610    %4:vr = PseudoVID_V_M1 undef $noreg, -1, 6, 0
611    %5:gpr = COPY $x0
612
613  bb.1:
614    successors: %bb.1, %bb.2
615
616    %6:gpr = PHI %5:gpr, %bb.0, %10:gpr, %bb.1
617    %7:vr = PseudoVADD_VX_M1 undef $noreg, %4:vr, %6:gpr, -1, 6, 0
618    %8:gpr = MUL %6:gpr, %2:gpr
619    %9:gpr = ADD %0:gpr, %8:gpr
620    PseudoVSE32_V_MF2 killed %7:vr, killed %9:gpr, -1, 5
621    %10:gpr = ADDI %6:gpr, 1
622    BLTU %10:gpr, %3:gpr, %bb.1
623    PseudoBR %bb.2
624
625  bb.2:
626
627    PseudoRET
628...
629---
630name:            vsetvli_loop_store2
631tracksRegLiveness: true
632registers:
633  - { id: 0, class: gpr, preferred-register: '' }
634  - { id: 1, class: gpr, preferred-register: '' }
635  - { id: 2, class: gpr, preferred-register: '' }
636  - { id: 3, class: gpr, preferred-register: '' }
637  - { id: 4, class: vr,  preferred-register: '' }
638  - { id: 5, class: gpr, preferred-register: '' }
639  - { id: 6, class: gpr, preferred-register: '' }
640  - { id: 7, class: vr,  preferred-register: '' }
641  - { id: 8, class: gpr, preferred-register: '' }
642  - { id: 9, class: gpr, preferred-register: '' }
643  - { id: 10, class: gpr, preferred-register: '' }
644body:             |
645  ; CHECK-LABEL: name: vsetvli_loop_store2
646  ; CHECK: bb.0:
647  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
648  ; CHECK-NEXT:   liveins: $x10, $x11
649  ; CHECK-NEXT: {{  $}}
650  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x10
651  ; CHECK-NEXT:   [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
652  ; CHECK-NEXT:   [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
653  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x11
654  ; CHECK-NEXT:   dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
655  ; CHECK-NEXT:   [[PseudoVID_V_M1_:%[0-9]+]]:vr = PseudoVID_V_M1 undef $noreg, -1, 6 /* e64 */, 3 /* ta, ma */, implicit $vl, implicit $vtype
656  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x0
657  ; CHECK-NEXT: {{  $}}
658  ; CHECK-NEXT: bb.1:
659  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
660  ; CHECK-NEXT: {{  $}}
661  ; CHECK-NEXT:   [[PseudoVADD_VX_M1_:%[0-9]+]]:vr = PseudoVADD_VX_M1 undef $noreg, [[PseudoVID_V_M1_]], [[COPY2]], -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
662  ; CHECK-NEXT:   [[MUL:%[0-9]+]]:gpr = MUL [[COPY2]], [[SRLI]]
663  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], [[MUL]]
664  ; CHECK-NEXT:   PseudoVSE32_V_MF2 [[PseudoVADD_VX_M1_]], [[ADD]], -1, 5 /* e32 */, implicit $vl, implicit $vtype
665  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = ADDI [[COPY2]], 1
666  ; CHECK-NEXT: {{  $}}
667  ; CHECK-NEXT: bb.2:
668  ; CHECK-NEXT:   successors: %bb.1(0x40000000), %bb.3(0x40000000)
669  ; CHECK-NEXT: {{  $}}
670  ; CHECK-NEXT:   BLTU [[COPY2]], [[COPY1]], %bb.1
671  ; CHECK-NEXT:   PseudoBR %bb.3
672  ; CHECK-NEXT: {{  $}}
673  ; CHECK-NEXT: bb.3:
674  ; CHECK-NEXT:   PseudoRET
675  bb.0:
676    liveins: $x10, $x11
677    %0:gpr = COPY $x10
678    %1:gpr = PseudoReadVLENB
679    %2:gpr = SRLI %1:gpr, 3
680    %3:gpr = COPY $x11
681    %4:vr = PseudoVID_V_M1 undef $noreg, -1, 6, 3
682    %5:gpr = COPY $x0
683
684  bb.1:
685    successors: %bb.3
686
687    %6:gpr = PHI %5:gpr, %bb.0, %10:gpr, %bb.3
688    %7:vr = PseudoVADD_VX_M1 undef $noreg, %4:vr, %6:gpr, -1, 6, 0
689    %8:gpr = MUL %6:gpr, %2:gpr
690    %9:gpr = ADD %0:gpr, %8:gpr
691    PseudoVSE32_V_MF2 killed %7:vr, killed %9:gpr, -1, 5
692    %10:gpr = ADDI %6:gpr, 1
693
694  bb.3:
695    successors: %bb.1, %bb.2
696    BLTU %10:gpr, %3:gpr, %bb.1
697    PseudoBR %bb.2
698
699  bb.2:
700
701    PseudoRET
702...
703---
704name:            redusum_loop
705alignment:       4
706tracksRegLiveness: true
707registers:
708  - { id: 0, class: gpr }
709  - { id: 1, class: gpr }
710  - { id: 2, class: vr }
711  - { id: 3, class: vr }
712  - { id: 4, class: gpr }
713  - { id: 5, class: gpr }
714  - { id: 6, class: gpr }
715  - { id: 7, class: gpr }
716  - { id: 8, class: gpr }
717  - { id: 9, class: gpr }
718  - { id: 10, class: vr }
719  - { id: 11, class: vr }
720  - { id: 12, class: vr }
721  - { id: 13, class: gpr }
722  - { id: 14, class: vr }
723  - { id: 15, class: vr }
724  - { id: 16, class: vr }
725  - { id: 17, class: vr }
726  - { id: 18, class: gpr }
727  - { id: 19, class: gpr }
728  - { id: 20, class: vr }
729  - { id: 21, class: vr }
730  - { id: 22, class: vr }
731  - { id: 23, class: vr }
732  - { id: 24, class: vr }
733liveins:
734  - { reg: '$x10', virtual-reg: '%6' }
735  - { reg: '$x12', virtual-reg: '%8' }
736frameInfo:
737  maxAlignment:    1
738machineFunctionInfo: {}
739body:             |
740  ; CHECK-LABEL: name: redusum_loop
741  ; CHECK: bb.0.entry:
742  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
743  ; CHECK-NEXT:   liveins: $x10, $x12
744  ; CHECK-NEXT: {{  $}}
745  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
746  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
747  ; CHECK-NEXT:   dead $x0 = PseudoVSETIVLI 4, 208 /* e32, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
748  ; CHECK-NEXT:   [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 undef $noreg, 0, 4, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
749  ; CHECK-NEXT:   [[LUI:%[0-9]+]]:gpr = LUI 1
750  ; CHECK-NEXT:   [[ADDIW:%[0-9]+]]:gpr = ADDIW [[LUI]], -2048
751  ; CHECK-NEXT: {{  $}}
752  ; CHECK-NEXT: bb.1.vector.body:
753  ; CHECK-NEXT:   successors: %bb.2(0x04000000), %bb.1(0x7c000000)
754  ; CHECK-NEXT: {{  $}}
755  ; CHECK-NEXT:   [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 undef $noreg, [[COPY1]], 4, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.lsr.iv12, align 4)
756  ; CHECK-NEXT:   [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 undef $noreg, [[PseudoVLE32_V_M1_]], [[PseudoVMV_V_I_M1_]], 4, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
757  ; CHECK-NEXT:   [[ADDIW:%[0-9]+]]:gpr = nsw ADDI [[ADDIW]], -4
758  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = ADDI [[COPY1]], 16
759  ; CHECK-NEXT:   BNE [[ADDIW]], $x0, %bb.1
760  ; CHECK-NEXT:   PseudoBR %bb.2
761  ; CHECK-NEXT: {{  $}}
762  ; CHECK-NEXT: bb.2.middle.block:
763  ; CHECK-NEXT:   [[PseudoVMV_S_X:%[0-9]+]]:vr = PseudoVMV_S_X undef $noreg, $x0, 1, 5 /* e32 */, implicit $vl, implicit $vtype
764  ; CHECK-NEXT:   [[PseudoVREDSUM_VS_M1_E8_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1_E8 undef $noreg, [[PseudoVMV_V_I_M1_]], [[PseudoVMV_S_X]], 4, 5 /* e32 */, 1 /* ta, mu */, implicit $vl, implicit $vtype
765  ; CHECK-NEXT:   dead $x0 = PseudoVSETIVLI 1, 208 /* e32, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
766  ; CHECK-NEXT:   PseudoVSE32_V_M1 [[PseudoVREDSUM_VS_M1_E8_]], [[COPY]], 1, 5 /* e32 */, implicit $vl, implicit $vtype :: (store (s32) into %ir.res)
767  ; CHECK-NEXT:   PseudoRET
768  bb.0.entry:
769    liveins: $x10, $x12
770
771    %8:gpr = COPY $x12
772    %6:gpr = COPY $x10
773    %11:vr = PseudoVMV_V_I_M1 undef $noreg, 0, 4, 5, 0
774    %12:vr = COPY %11
775    %10:vr = COPY %12
776    %13:gpr = LUI 1
777    %9:gpr = ADDIW killed %13, -2048
778
779  bb.1.vector.body:
780    successors: %bb.2(0x04000000), %bb.1(0x7c000000)
781
782    %0:gpr = PHI %6, %bb.0, %5, %bb.1
783    %1:gpr = PHI %9, %bb.0, %4, %bb.1
784    %2:vr = PHI %10, %bb.0, %16, %bb.1
785    %14:vr = PseudoVLE32_V_M1 undef $noreg, %0, 4, 5, 0 :: (load (s128) from %ir.lsr.iv12, align 4)
786    %16:vr = PseudoVADD_VV_M1 undef $noreg, killed %14, %2, 4, 5, 0
787    %4:gpr = nsw ADDI %1, -4
788    %5:gpr = ADDI %0, 16
789    %18:gpr = COPY $x0
790    BNE %4, %18, %bb.1
791    PseudoBR %bb.2
792
793  bb.2.middle.block:
794    %19:gpr = COPY $x0
795    %20:vr = PseudoVMV_S_X undef $noreg, %19, 1, 5
796    %23:vr = PseudoVREDSUM_VS_M1_E8 undef $noreg, %16, killed %20, 4, 5, 1
797    PseudoVSE32_V_M1 killed %23, %8, 1, 5 :: (store (s32) into %ir.res)
798    PseudoRET
799
800...
801---
802name:            vsetvli_vluxei64_regression
803tracksRegLiveness: true
804body:             |
805  ; CHECK-LABEL: name: vsetvli_vluxei64_regression
806  ; CHECK: bb.0:
807  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
808  ; CHECK-NEXT:   liveins: $x10, $x11, $x12, $v0, $v1, $v2, $v3
809  ; CHECK-NEXT: {{  $}}
810  ; CHECK-NEXT:   %a:gpr = COPY $x10
811  ; CHECK-NEXT:   %b:gpr = COPY $x11
812  ; CHECK-NEXT:   %inaddr:gpr = COPY $x12
813  ; CHECK-NEXT:   %idxs:vr = COPY $v0
814  ; CHECK-NEXT:   %t1:vr = COPY $v1
815  ; CHECK-NEXT:   %t3:vr = COPY $v2
816  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vrnov0 = COPY $v3
817  ; CHECK-NEXT:   %t5:vrnov0 = COPY $v1
818  ; CHECK-NEXT:   dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
819  ; CHECK-NEXT:   %t6:vr = PseudoVMSEQ_VI_M1 %t1, 0, -1, 6 /* e64 */, implicit $vl, implicit $vtype
820  ; CHECK-NEXT:   PseudoBR %bb.1
821  ; CHECK-NEXT: {{  $}}
822  ; CHECK-NEXT: bb.1:
823  ; CHECK-NEXT:   successors: %bb.3(0x40000000), %bb.2(0x40000000)
824  ; CHECK-NEXT: {{  $}}
825  ; CHECK-NEXT:   %mask:vr = PseudoVMANDN_MM_B64 %t6, %t3, -1, 0 /* e8 */, implicit $vl, implicit $vtype
826  ; CHECK-NEXT:   BEQ %a, $x0, %bb.3
827  ; CHECK-NEXT:   PseudoBR %bb.2
828  ; CHECK-NEXT: {{  $}}
829  ; CHECK-NEXT: bb.2:
830  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
831  ; CHECK-NEXT: {{  $}}
832  ; CHECK-NEXT:   $v0 = COPY %mask
833  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLIX0 killed $x0, 69 /* e8, mf8, ta, mu */, implicit-def $vl, implicit-def $vtype, implicit $vl
834  ; CHECK-NEXT:   early-clobber [[COPY]]:vrnov0 = PseudoVLUXEI64_V_M1_MF8_MASK %t5, %inaddr, %idxs, $v0, -1, 3 /* e8 */, 1 /* ta, mu */, implicit $vl, implicit $vtype
835  ; CHECK-NEXT:   PseudoBR %bb.3
836  ; CHECK-NEXT: {{  $}}
837  ; CHECK-NEXT: bb.3:
838  ; CHECK-NEXT:   dead [[PseudoVSETVLIX0_1:%[0-9]+]]:gpr = PseudoVSETVLIX0 $x0, 197 /* e8, mf8, ta, ma */, implicit-def $vl, implicit-def $vtype
839  ; CHECK-NEXT:   $v0 = COPY %mask
840  ; CHECK-NEXT:   PseudoVSOXEI64_V_M1_MF8_MASK [[COPY]], %b, %idxs, $v0, -1, 3 /* e8 */, implicit $vl, implicit $vtype
841  ; CHECK-NEXT:   PseudoRET
842  bb.0:
843    successors: %bb.1
844    liveins: $x10, $x11, $x12, $v0, $v1, $v2, $v3
845
846    %a:gpr = COPY $x10
847    %b:gpr = COPY $x11
848    %inaddr:gpr = COPY $x12
849    %idxs:vr = COPY $v0
850    %t1:vr = COPY $v1
851    %t3:vr = COPY $v2
852    %t4:vr = COPY $v3
853    %t5:vrnov0 = COPY $v1
854    %t6:vr = PseudoVMSEQ_VI_M1 %t1, 0, -1, 6
855    PseudoBR %bb.1
856
857  bb.1:
858    successors: %bb.3, %bb.2
859
860    %mask:vr = PseudoVMANDN_MM_B64 %t6, %t3, -1, 0
861    %t2:gpr = COPY $x0
862    BEQ %a, %t2, %bb.3
863    PseudoBR %bb.2
864
865  bb.2:
866    successors: %bb.3
867
868    $v0 = COPY %mask
869    early-clobber %t0:vrnov0 = PseudoVLUXEI64_V_M1_MF8_MASK %t5, killed %inaddr, %idxs, $v0, -1, 3, 1
870    %ldval:vr = COPY %t0
871    PseudoBR %bb.3
872
873  bb.3:
874    %stval:vr = PHI %t4, %bb.1, %ldval, %bb.2
875    $v0 = COPY %mask
876    PseudoVSOXEI64_V_M1_MF8_MASK killed %stval, killed %b, %idxs, $v0, -1, 3
877    PseudoRET
878
879...
880---
881name:            if_in_loop
882tracksRegLiveness: true
883body:             |
884  ; CHECK-LABEL: name: if_in_loop
885  ; CHECK: bb.0:
886  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
887  ; CHECK-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15
888  ; CHECK-NEXT: {{  $}}
889  ; CHECK-NEXT:   %dst:gpr = COPY $x10
890  ; CHECK-NEXT:   %src:gpr = COPY $x11
891  ; CHECK-NEXT:   dead [[COPY:%[0-9]+]]:gpr = COPY $x12
892  ; CHECK-NEXT:   %tc:gpr = COPY $x13
893  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x14
894  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x15
895  ; CHECK-NEXT:   %vlenb:gpr = PseudoReadVLENB
896  ; CHECK-NEXT:   %inc:gpr = SRLI %vlenb, 3
897  ; CHECK-NEXT:   dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
898  ; CHECK-NEXT:   [[PseudoVID_V_M1_:%[0-9]+]]:vr = PseudoVID_V_M1 undef $noreg, -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
899  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x0
900  ; CHECK-NEXT:   PseudoBR %bb.1
901  ; CHECK-NEXT: {{  $}}
902  ; CHECK-NEXT: bb.1:
903  ; CHECK-NEXT:   successors: %bb.2(0x40000000), %bb.3(0x40000000)
904  ; CHECK-NEXT: {{  $}}
905  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:gpr = ADD [[COPY2]], [[COPY3]]
906  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
907  ; CHECK-NEXT:   [[PseudoVADD_VX_M1_:%[0-9]+]]:vr = PseudoVADD_VX_M1 undef $noreg, [[PseudoVID_V_M1_]], [[ADD]], -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
908  ; CHECK-NEXT:   [[PseudoVMSLTU_VX_M1_:%[0-9]+]]:vr = PseudoVMSLTU_VX_M1 [[PseudoVADD_VX_M1_]], [[COPY1]], -1, 6 /* e64 */, implicit $vl, implicit $vtype
909  ; CHECK-NEXT:   [[PseudoVCPOP_M_B64_:%[0-9]+]]:gpr = PseudoVCPOP_M_B64 [[PseudoVMSLTU_VX_M1_]], -1, 0 /* e8 */, implicit $vl, implicit $vtype
910  ; CHECK-NEXT:   BEQ [[PseudoVCPOP_M_B64_]], $x0, %bb.3
911  ; CHECK-NEXT:   PseudoBR %bb.2
912  ; CHECK-NEXT: {{  $}}
913  ; CHECK-NEXT: bb.2:
914  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
915  ; CHECK-NEXT: {{  $}}
916  ; CHECK-NEXT:   [[ADD1:%[0-9]+]]:gpr = ADD %src, [[COPY3]]
917  ; CHECK-NEXT:   [[PseudoVLE8_V_MF8_:%[0-9]+]]:vrnov0 = PseudoVLE8_V_MF8 undef $noreg, [[ADD1]], -1, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
918  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLIX0 killed $x0, 197 /* e8, mf8, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
919  ; CHECK-NEXT:   [[PseudoVADD_VI_MF8_:%[0-9]+]]:vrnov0 = PseudoVADD_VI_MF8 undef $noreg, [[PseudoVLE8_V_MF8_]], 4, -1, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
920  ; CHECK-NEXT:   [[ADD2:%[0-9]+]]:gpr = ADD %dst, [[COPY3]]
921  ; CHECK-NEXT:   PseudoVSE8_V_MF8 [[PseudoVADD_VI_MF8_]], [[ADD2]], -1, 3 /* e8 */, implicit $vl, implicit $vtype
922  ; CHECK-NEXT: {{  $}}
923  ; CHECK-NEXT: bb.3:
924  ; CHECK-NEXT:   successors: %bb.1(0x7c000000), %bb.4(0x04000000)
925  ; CHECK-NEXT: {{  $}}
926  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = ADD [[COPY3]], %inc
927  ; CHECK-NEXT:   BLTU [[COPY3]], %tc, %bb.1
928  ; CHECK-NEXT:   PseudoBR %bb.4
929  ; CHECK-NEXT: {{  $}}
930  ; CHECK-NEXT: bb.4:
931  ; CHECK-NEXT:   PseudoRET
932  bb.0:
933    successors: %bb.1(0x80000000)
934    liveins: $x10, $x11, $x12, $x13, $x14, $x15
935
936    %dst:gpr = COPY $x10
937    %src:gpr = COPY $x11
938    %48:gpr = COPY $x12
939    %tc:gpr = COPY $x13
940    %11:gpr = COPY $x14
941    %12:gpr = COPY $x15
942    %vlenb:gpr = PseudoReadVLENB
943    %inc:gpr = SRLI killed %vlenb, 3
944    %10:vr = PseudoVID_V_M1 undef $noreg, -1, 6, 0
945    %59:gpr = COPY $x0
946    PseudoBR %bb.1
947
948  bb.1:
949    successors: %bb.2(0x40000000), %bb.3(0x40000000)
950
951    %26:gpr = PHI %59, %bb.0, %28, %bb.3
952    %61:gpr = ADD %12, %26
953    %27:vr = PseudoVADD_VX_M1 undef $noreg, %10, killed %61, -1, 6, 0
954    %62:vr = PseudoVMSLTU_VX_M1 %27, %11, -1, 6
955    %63:gpr = PseudoVCPOP_M_B64 %62, -1, 0
956    %64:gpr = COPY $x0
957    BEQ killed %63, %64, %bb.3
958    PseudoBR %bb.2
959
960  bb.2:
961    successors: %bb.3(0x80000000)
962
963    %66:gpr = ADD %src, %26
964    %67:vrnov0 = PseudoVLE8_V_MF8 undef $noreg, killed %66, -1, 3, 0
965    %76:vrnov0 = PseudoVADD_VI_MF8 undef $noreg, %67, 4, -1, 3, 0
966    %77:gpr = ADD %dst, %26
967    PseudoVSE8_V_MF8 killed %76, killed %77, -1, 3
968
969  bb.3:
970    successors: %bb.1(0x7c000000), %bb.4(0x04000000)
971
972    %28:gpr = ADD %26, %inc
973    BLTU %28, %tc, %bb.1
974    PseudoBR %bb.4
975
976  bb.4:
977    PseudoRET
978
979...
980---
981name: pre_undemanded_vl
982body: |
983  ; CHECK-LABEL: name: pre_undemanded_vl
984  ; CHECK: bb.0:
985  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
986  ; CHECK-NEXT: {{  $}}
987  ; CHECK-NEXT:   dead $x0 = PseudoVSETIVLI 1, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
988  ; CHECK-NEXT:   PseudoBR %bb.1
989  ; CHECK-NEXT: {{  $}}
990  ; CHECK-NEXT: bb.1:
991  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
992  ; CHECK-NEXT: {{  $}}
993  ; CHECK-NEXT:   dead %x:gpr = PseudoVMV_X_S undef $noreg, 6 /* e64 */, implicit $vtype
994  ; CHECK-NEXT:   PseudoBR %bb.1
995  bb.0:
996    PseudoBR %bb.1
997  bb.1:
998    %x:gpr = PseudoVMV_X_S undef $noreg, 6
999    PseudoBR %bb.1
1000...
1001---
1002name: clobberred_forwarded_avl
1003tracksRegLiveness: true
1004body: |
1005  ; CHECK-LABEL: name: clobberred_forwarded_avl
1006  ; CHECK: bb.0:
1007  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
1008  ; CHECK-NEXT:   liveins: $x10, $v8m2
1009  ; CHECK-NEXT: {{  $}}
1010  ; CHECK-NEXT:   %avl:gprnox0 = COPY $x10
1011  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY %avl
1012  ; CHECK-NEXT:   dead %outvl:gprnox0 = PseudoVSETVLI %avl, 209 /* e32, m2, ta, ma */, implicit-def $vl, implicit-def $vtype
1013  ; CHECK-NEXT: {{  $}}
1014  ; CHECK-NEXT: bb.1:
1015  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
1016  ; CHECK-NEXT:   liveins: $v8m2
1017  ; CHECK-NEXT: {{  $}}
1018  ; CHECK-NEXT:   dead %avl:gprnox0 = ADDI %avl, 1
1019  ; CHECK-NEXT: {{  $}}
1020  ; CHECK-NEXT: bb.2:
1021  ; CHECK-NEXT:   liveins: $v8m2
1022  ; CHECK-NEXT: {{  $}}
1023  ; CHECK-NEXT:   dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 killed $x0, 209 /* e32, m2, ta, ma */, implicit-def $vl, implicit-def $vtype
1024  ; CHECK-NEXT:   renamable $v10m2 = PseudoVADD_VV_M2 undef renamable $v10m2, renamable $v8m2, renamable $v8m2, -1, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
1025  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLI [[COPY]], 209 /* e32, m2, ta, ma */, implicit-def $vl, implicit-def $vtype
1026  ; CHECK-NEXT:   renamable $v8m2 = PseudoVADD_VV_M2 undef renamable $v8m2, killed renamable $v10m2, renamable $v8m2, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
1027  ; CHECK-NEXT:   PseudoRET implicit $v8m2
1028  bb.0:
1029    liveins: $x10, $v8m2
1030    %avl:gprnox0 = COPY $x10
1031    %outvl:gprnox0 = PseudoVSETVLI %avl:gprnox0, 209, implicit-def dead $vl, implicit-def dead $vtype
1032
1033  bb.1:
1034    liveins: $v8m2
1035    %avl:gprnox0 = ADDI %avl:gprnox0, 1
1036
1037  bb.2:
1038    liveins: $v8m2
1039    renamable $v10m2 = PseudoVADD_VV_M2 undef renamable $v10m2, renamable $v8m2, renamable $v8m2, -1, 5, 0
1040    renamable $v8m2 = PseudoVADD_VV_M2 undef renamable $v8m2, killed renamable $v10m2, killed renamable $v8m2, %outvl:gprnox0, 5, 0
1041    PseudoRET implicit $v8m2
1042...
1043---
1044name: clobberred_forwarded_phi_avl
1045tracksRegLiveness: true
1046body: |
1047  ; CHECK-LABEL: name: clobberred_forwarded_phi_avl
1048  ; CHECK: bb.0:
1049  ; CHECK-NEXT:   successors: %bb.2(0x40000000), %bb.1(0x40000000)
1050  ; CHECK-NEXT:   liveins: $x10, $x11, $v8m2
1051  ; CHECK-NEXT: {{  $}}
1052  ; CHECK-NEXT:   %v:vrm2 = COPY $v8m2
1053  ; CHECK-NEXT:   [[ADDI:%[0-9]+]]:gprnox0 = ADDI $x0, 1
1054  ; CHECK-NEXT:   %x:gpr = COPY $x10
1055  ; CHECK-NEXT:   %y:gpr = COPY $x11
1056  ; CHECK-NEXT:   BEQ %x, %y, %bb.2
1057  ; CHECK-NEXT: {{  $}}
1058  ; CHECK-NEXT: bb.1:
1059  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
1060  ; CHECK-NEXT: {{  $}}
1061  ; CHECK-NEXT:   [[ADDI:%[0-9]+]]:gprnox0 = ADDI $x0, 2
1062  ; CHECK-NEXT: {{  $}}
1063  ; CHECK-NEXT: bb.2:
1064  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
1065  ; CHECK-NEXT: {{  $}}
1066  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY [[ADDI]]
1067  ; CHECK-NEXT:   dead %outvl:gprnox0 = PseudoVSETVLI [[ADDI]], 209 /* e32, m2, ta, ma */, implicit-def $vl, implicit-def $vtype
1068  ; CHECK-NEXT: {{  $}}
1069  ; CHECK-NEXT: bb.3:
1070  ; CHECK-NEXT:   successors: %bb.4(0x80000000)
1071  ; CHECK-NEXT: {{  $}}
1072  ; CHECK-NEXT:   dead [[ADDI:%[0-9]+]]:gprnox0 = ADDI [[ADDI]], 1
1073  ; CHECK-NEXT: {{  $}}
1074  ; CHECK-NEXT: bb.4:
1075  ; CHECK-NEXT:   dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 killed $x0, 209 /* e32, m2, ta, ma */, implicit-def $vl, implicit-def $vtype
1076  ; CHECK-NEXT:   renamable $v10m2 = PseudoVADD_VV_M2 undef renamable $v10m2, %v, %v, -1, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
1077  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLI [[COPY]], 209 /* e32, m2, ta, ma */, implicit-def $vl, implicit-def $vtype
1078  ; CHECK-NEXT:   renamable $v8m2 = PseudoVADD_VV_M2 undef renamable $v8m2, killed renamable $v10m2, %v, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
1079  ; CHECK-NEXT:   PseudoRET implicit $v8m2
1080  bb.0:
1081    liveins: $x10, $x11, $v8m2
1082    %v:vrm2 = COPY $v8m2
1083    %a:gpr = ADDI $x0, 1
1084    %x:gpr = COPY $x10
1085    %y:gpr = COPY $x11
1086    BEQ %x, %y, %bb.2
1087
1088  bb.1:
1089    %b:gpr = ADDI $x0, 2
1090
1091  bb.2:
1092    %avl:gprnox0 = PHI %a, %bb.0, %b, %bb.1
1093    %outvl:gprnox0 = PseudoVSETVLI %avl:gprnox0, 209, implicit-def dead $vl, implicit-def dead $vtype
1094
1095  bb.3:
1096    %avl:gprnox0 = ADDI %avl:gprnox0, 1
1097
1098  bb.4:
1099    renamable $v10m2 = PseudoVADD_VV_M2 undef renamable $v10m2, %v, %v, -1, 5, 0
1100    renamable $v8m2 = PseudoVADD_VV_M2 undef renamable $v8m2, killed renamable $v10m2, killed %v, %outvl:gprnox0, 5, 0
1101    PseudoRET implicit $v8m2
1102