xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir (revision 903f6fceb86e68b0dbc11b13f808fc00a471e595)
1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2# RUN: llc %s -o - -mtriple=riscv64 -mattr=v -run-pass=liveintervals,riscv-insert-vsetvli \
3# RUN:     -verify-machineinstrs | FileCheck %s
4
5--- |
6  source_filename = "vsetvli-insert.ll"
7  target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
8  target triple = "riscv64"
9
10  define <vscale x 1 x i64> @add(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) #0 {
11  entry:
12    %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2)
13    ret <vscale x 1 x i64> %a
14  }
15
16  define <vscale x 1 x i64> @load_add(ptr %0, <vscale x 1 x i64> %1, i64 %2) #0 {
17  entry:
18    %a = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64> undef, ptr %0, i64 %2)
19    %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %a, <vscale x 1 x i64> %1, i64 %2)
20    ret <vscale x 1 x i64> %b
21  }
22
23  define <vscale x 1 x i64> @load_zext(ptr %0, i64 %1) #0 {
24  entry:
25    %a = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32> undef, ptr %0, i64 %1)
26    %b = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> %a, i64 %1)
27    ret <vscale x 1 x i64> %b
28  }
29
30  declare i64 @llvm.riscv.vmv.x.s.nxv1i64(<vscale x 1 x i64>) #1
31
32  define i64 @vmv_x_s(<vscale x 1 x i64> %0) #0 {
33  entry:
34    %a = call i64 @llvm.riscv.vmv.x.s.nxv1i64(<vscale x 1 x i64> %0)
35    ret i64 %a
36  }
37
38  define void @add_v2i64(ptr %x, ptr %y) #0 {
39    %a = load <2 x i64>, ptr %x, align 16
40    %b = load <2 x i64>, ptr %y, align 16
41    %c = add <2 x i64> %a, %b
42    store <2 x i64> %c, ptr %x, align 16
43    ret void
44  }
45
46  declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>) #2
47
48  define i64 @vreduce_add_v2i64(ptr %x) #0 {
49    %v = load <2 x i64>, ptr %x, align 16
50    %red = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %v)
51    ret i64 %red
52  }
53
54  declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) #3
55
56  define <vscale x 1 x i64> @vsetvli_add(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %avl) #0 {
57  entry:
58    %a = call i64 @llvm.riscv.vsetvli.i64(i64 %avl, i64 3, i64 0)
59    %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %a)
60    ret <vscale x 1 x i64> %b
61  }
62
63  define <vscale x 1 x i64> @load_add_inlineasm(ptr %0, <vscale x 1 x i64> %1, i64 %2) #0 {
64  entry:
65    %a = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64> undef, ptr %0, i64 %2)
66    call void asm sideeffect "", ""()
67    %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %a, <vscale x 1 x i64> %1, i64 %2)
68    ret <vscale x 1 x i64> %b
69  }
70
71  define void @vmv_v_i_different_lmuls() {
72    ret void
73  }
74
75  define void @pre_same_sewlmul_ratio() {
76    ret void
77  }
78
79  define void @postpass_modify_vl() {
80    ret void
81  }
82
83  define void @coalesce_dead_avl_addi() {
84    ret void
85  }
86
87  define void @coalesce_dead_avl_nonvolatile_load() {
88    ret void
89  }
90
91  define void @coalesce_dead_avl_volatile_load() {
92    ret void
93  }
94
95  define void @coalesce_shrink_removed_vsetvlis_uses() {
96    ret void
97  }
98
99  declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, i64) #1
100
101  declare <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64>, ptr nocapture, i64) #4
102
103  declare <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32>, ptr nocapture, i64) #4
104
105  declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>, <vscale x 1 x i32>, i64) #1
106
107  attributes #0 = { "target-features"="+v" }
108  attributes #1 = { nounwind readnone }
109  attributes #2 = { nofree nosync nounwind readnone willreturn }
110  attributes #3 = { nounwind }
111  attributes #4 = { nounwind readonly }
112
113...
114---
115name:            add
116alignment:       4
117tracksRegLiveness: true
118registers:
119  - { id: 0, class: vr }
120  - { id: 1, class: vr }
121  - { id: 2, class: gprnox0 }
122  - { id: 3, class: vr }
123liveins:
124  - { reg: '$v8', virtual-reg: '%0' }
125  - { reg: '$v9', virtual-reg: '%1' }
126  - { reg: '$x10', virtual-reg: '%2' }
127frameInfo:
128  maxAlignment:    1
129machineFunctionInfo: {}
130body:             |
131  bb.0.entry:
132    liveins: $v8, $v9, $x10
133
134    ; CHECK-LABEL: name: add
135    ; CHECK: liveins: $v8, $v9, $x10
136    ; CHECK-NEXT: {{  $}}
137    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
138    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
139    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8
140    ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
141    ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 undef $noreg, [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
142    ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
143    ; CHECK-NEXT: PseudoRET implicit $v8
144    %2:gprnox0 = COPY $x10
145    %1:vr = COPY $v9
146    %0:vr = COPY $v8
147    %3:vr = PseudoVADD_VV_M1 undef $noreg, %0, %1, %2, 6, 0
148    $v8 = COPY %3
149    PseudoRET implicit $v8
150
151...
152---
153name:            load_add
154alignment:       4
155tracksRegLiveness: true
156registers:
157  - { id: 0, class: gpr }
158  - { id: 1, class: vr }
159  - { id: 2, class: gprnox0 }
160  - { id: 3, class: vr }
161  - { id: 4, class: vr }
162liveins:
163  - { reg: '$x10', virtual-reg: '%0' }
164  - { reg: '$v8', virtual-reg: '%1' }
165  - { reg: '$x11', virtual-reg: '%2' }
166frameInfo:
167  maxAlignment:    1
168machineFunctionInfo: {}
169body:             |
170  bb.0.entry:
171    liveins: $x10, $v8, $x11
172
173    ; CHECK-LABEL: name: load_add
174    ; CHECK: liveins: $x10, $v8, $x11
175    ; CHECK-NEXT: {{  $}}
176    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
177    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v8
178    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10
179    ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
180    ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 undef $noreg, [[COPY2]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
181    ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 undef $noreg, [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
182    ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
183    ; CHECK-NEXT: PseudoRET implicit $v8
184    %2:gprnox0 = COPY $x11
185    %1:vr = COPY $v8
186    %0:gpr = COPY $x10
187    %3:vr = PseudoVLE64_V_M1 undef $noreg, %0, %2, 6, 0
188    %4:vr = PseudoVADD_VV_M1 undef $noreg, killed %3, %1, %2, 6, 0
189    $v8 = COPY %4
190    PseudoRET implicit $v8
191
192...
193---
194name:            load_zext
195alignment:       4
196tracksRegLiveness: true
197registers:
198  - { id: 0, class: gpr }
199  - { id: 1, class: gprnox0 }
200  - { id: 2, class: vr }
201  - { id: 3, class: vr }
202liveins:
203  - { reg: '$x10', virtual-reg: '%0' }
204  - { reg: '$x11', virtual-reg: '%1' }
205frameInfo:
206  maxAlignment:    1
207machineFunctionInfo: {}
208body:             |
209  bb.0.entry:
210    liveins: $x10, $x11
211
212    ; CHECK-LABEL: name: load_zext
213    ; CHECK: liveins: $x10, $x11
214    ; CHECK-NEXT: {{  $}}
215    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
216    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10
217    ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
218    ; CHECK-NEXT: [[PseudoVLE32_V_MF2_:%[0-9]+]]:vr = PseudoVLE32_V_MF2 undef $noreg, [[COPY1]], $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
219    ; CHECK-NEXT: early-clobber %3:vr = PseudoVZEXT_VF2_M1 undef $noreg, [[PseudoVLE32_V_MF2_]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
220    ; CHECK-NEXT: $v8 = COPY %3
221    ; CHECK-NEXT: PseudoRET implicit $v8
222    %1:gprnox0 = COPY $x11
223    %0:gpr = COPY $x10
224    %2:vr = PseudoVLE32_V_MF2 undef $noreg, %0, %1, 5, 0
225    early-clobber %3:vr = PseudoVZEXT_VF2_M1 undef $noreg, killed %2, %1, 6, 0
226    $v8 = COPY %3
227    PseudoRET implicit $v8
228
229...
230---
231name:            vmv_x_s
232alignment:       4
233tracksRegLiveness: true
234registers:
235  - { id: 0, class: vr }
236  - { id: 1, class: gpr }
237liveins:
238  - { reg: '$v8', virtual-reg: '%0' }
239frameInfo:
240  maxAlignment:    1
241machineFunctionInfo: {}
242body:             |
243  bb.0.entry:
244    liveins: $v8
245
246    ; CHECK-LABEL: name: vmv_x_s
247    ; CHECK: liveins: $v8
248    ; CHECK-NEXT: {{  $}}
249    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
250    ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 1, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
251    ; CHECK-NEXT: [[PseudoVMV_X_S:%[0-9]+]]:gpr = PseudoVMV_X_S [[COPY]], 6 /* e64 */, implicit $vtype
252    ; CHECK-NEXT: $x10 = COPY [[PseudoVMV_X_S]]
253    ; CHECK-NEXT: PseudoRET implicit $x10
254    %0:vr = COPY $v8
255    %1:gpr = PseudoVMV_X_S %0, 6
256    $x10 = COPY %1
257    PseudoRET implicit $x10
258
259...
260---
261name:            add_v2i64
262alignment:       4
263tracksRegLiveness: true
264registers:
265  - { id: 0, class: gpr }
266  - { id: 1, class: gpr }
267  - { id: 2, class: vr }
268  - { id: 3, class: vr }
269  - { id: 4, class: vr }
270liveins:
271  - { reg: '$x10', virtual-reg: '%0' }
272  - { reg: '$x11', virtual-reg: '%1' }
273frameInfo:
274  maxAlignment:    1
275machineFunctionInfo: {}
276body:             |
277  bb.0 (%ir-block.0):
278    liveins: $x10, $x11
279
280    ; CHECK-LABEL: name: add_v2i64
281    ; CHECK: liveins: $x10, $x11
282    ; CHECK-NEXT: {{  $}}
283    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x11
284    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10
285    ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
286    ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 undef $noreg, [[COPY1]], 2, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.x)
287    ; CHECK-NEXT: [[PseudoVLE64_V_M1_1:%[0-9]+]]:vr = PseudoVLE64_V_M1 undef $noreg, [[COPY]], 2, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.y)
288    ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 undef $noreg, [[PseudoVLE64_V_M1_]], [[PseudoVLE64_V_M1_1]], 2, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
289    ; CHECK-NEXT: PseudoVSE64_V_M1 [[PseudoVADD_VV_M1_]], [[COPY1]], 2, 6 /* e64 */, implicit $vl, implicit $vtype :: (store (s128) into %ir.x)
290    ; CHECK-NEXT: PseudoRET
291    %1:gpr = COPY $x11
292    %0:gpr = COPY $x10
293    %2:vr = PseudoVLE64_V_M1 undef $noreg, %0, 2, 6, 0 :: (load (s128) from %ir.x)
294    %3:vr = PseudoVLE64_V_M1 undef $noreg, %1, 2, 6, 0 :: (load (s128) from %ir.y)
295    %4:vr = PseudoVADD_VV_M1 undef $noreg, killed %2, killed %3, 2, 6, 0
296    PseudoVSE64_V_M1 killed %4, %0, 2, 6 :: (store (s128) into %ir.x)
297    PseudoRET
298
299...
300---
301name:            vreduce_add_v2i64
302alignment:       4
303tracksRegLiveness: true
304registers:
305  - { id: 0, class: gpr }
306  - { id: 1, class: vr }
307  - { id: 2, class: vr }
308  - { id: 3, class: vr }
309  - { id: 4, class: vr }
310  - { id: 5, class: gpr }
311liveins:
312  - { reg: '$x10', virtual-reg: '%0' }
313frameInfo:
314  maxAlignment:    1
315machineFunctionInfo: {}
316body:             |
317  bb.0 (%ir-block.0):
318    liveins: $x10
319
320    ; CHECK-LABEL: name: vreduce_add_v2i64
321    ; CHECK: liveins: $x10
322    ; CHECK-NEXT: {{  $}}
323    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
324    ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
325    ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 undef $noreg, [[COPY]], 2, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.x)
326    ; CHECK-NEXT: dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
327    ; CHECK-NEXT: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 undef $noreg, 0, -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
328    ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
329    ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_E8_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1_E8 undef $noreg, [[PseudoVLE64_V_M1_]], [[PseudoVMV_V_I_M1_]], 2, 6 /* e64 */, 1 /* ta, mu */, implicit $vl, implicit $vtype
330    ; CHECK-NEXT: [[PseudoVMV_X_S:%[0-9]+]]:gpr = PseudoVMV_X_S [[PseudoVREDSUM_VS_M1_E8_]], 6 /* e64 */, implicit $vtype
331    ; CHECK-NEXT: $x10 = COPY [[PseudoVMV_X_S]]
332    ; CHECK-NEXT: PseudoRET implicit $x10
333    %0:gpr = COPY $x10
334    %1:vr = PseudoVLE64_V_M1 undef $noreg, %0, 2, 6, 0 :: (load (s128) from %ir.x)
335    %2:vr = PseudoVMV_V_I_M1 undef $noreg, 0, -1, 6, 0
336    %3:vr = PseudoVREDSUM_VS_M1_E8 undef $noreg, killed %1, killed %2, 2, 6, 1
337    %5:gpr = PseudoVMV_X_S killed %3, 6
338    $x10 = COPY %5
339    PseudoRET implicit $x10
340
341...
342---
343name:            vsetvli_add
344alignment:       4
345tracksRegLiveness: true
346registers:
347  - { id: 0, class: vr }
348  - { id: 1, class: vr }
349  - { id: 2, class: gprnox0 }
350  - { id: 3, class: gprnox0 }
351  - { id: 4, class: vr }
352liveins:
353  - { reg: '$v8', virtual-reg: '%0' }
354  - { reg: '$v9', virtual-reg: '%1' }
355  - { reg: '$x10', virtual-reg: '%2' }
356frameInfo:
357  maxAlignment:    1
358machineFunctionInfo: {}
359body:             |
360  bb.0.entry:
361    liveins: $v8, $v9, $x10
362
363    ; CHECK-LABEL: name: vsetvli_add
364    ; CHECK: liveins: $v8, $v9, $x10
365    ; CHECK-NEXT: {{  $}}
366    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
367    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
368    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8
369    ; CHECK-NEXT: dead [[PseudoVSETVLI:%[0-9]+]]:gprnox0 = PseudoVSETVLI [[COPY]], 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
370    ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 undef $noreg, [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
371    ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
372    ; CHECK-NEXT: PseudoRET implicit $v8
373    %2:gprnox0 = COPY $x10
374    %1:vr = COPY $v9
375    %0:vr = COPY $v8
376    %3:gprnox0 = PseudoVSETVLI %2, 88, implicit-def dead $vl, implicit-def dead $vtype
377    %4:vr = PseudoVADD_VV_M1 undef $noreg, %0, %1, killed %3, 6, 0
378    $v8 = COPY %4
379    PseudoRET implicit $v8
380
381...
382---
383name:            load_add_inlineasm
384alignment:       4
385tracksRegLiveness: true
386registers:
387  - { id: 0, class: gpr }
388  - { id: 1, class: vr }
389  - { id: 2, class: gprnox0 }
390  - { id: 3, class: vr }
391  - { id: 4, class: vr }
392liveins:
393  - { reg: '$x10', virtual-reg: '%0' }
394  - { reg: '$v8', virtual-reg: '%1' }
395  - { reg: '$x11', virtual-reg: '%2' }
396frameInfo:
397  maxAlignment:    1
398machineFunctionInfo: {}
399body:             |
400  bb.0.entry:
401    liveins: $x10, $v8, $x11
402
403    ; CHECK-LABEL: name: load_add_inlineasm
404    ; CHECK: liveins: $x10, $v8, $x11
405    ; CHECK-NEXT: {{  $}}
406    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
407    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v8
408    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10
409    ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
410    ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 undef $noreg, [[COPY2]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
411    ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */
412    ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
413    ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 undef $noreg, [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
414    ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
415    ; CHECK-NEXT: PseudoRET implicit $v8
416    %2:gprnox0 = COPY $x11
417    %1:vr = COPY $v8
418    %0:gpr = COPY $x10
419    %3:vr = PseudoVLE64_V_M1 undef $noreg, %0, %2, 6, 0
420    INLINEASM &"", 1 /* sideeffect attdialect */
421    %4:vr = PseudoVADD_VV_M1 undef $noreg, killed %3, %1, %2, 6, 0
422    $v8 = COPY %4
423    PseudoRET implicit $v8
424
425...
426---
427name:            vmv_v_i_different_lmuls
428tracksRegLiveness: true
429body:             |
430  bb.0:
431    liveins: $x10, $v8, $x11
432
433    ; CHECK-LABEL: name: vmv_v_i_different_lmuls
434    ; CHECK: liveins: $x10, $v8, $x11
435    ; CHECK-NEXT: {{  $}}
436    ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 4, 217 /* e64, m2, ta, ma */, implicit-def $vl, implicit-def $vtype
437    ; CHECK-NEXT: dead [[PseudoVID_V_M2_:%[0-9]+]]:vrm2 = PseudoVID_V_M2 undef $noreg, 4, 6 /* e64 */, 3 /* ta, ma */, implicit $vl, implicit $vtype
438    ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 198 /* e8, mf4, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
439    ; CHECK-NEXT: dead [[PseudoVMV_V_I_MF4_:%[0-9]+]]:vr = PseudoVMV_V_I_MF4 undef $noreg, 0, 4, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
440    ; CHECK-NEXT: PseudoRET
441    %0:vrm2 = PseudoVID_V_M2 undef $noreg, 4, 6, 3
442    %4:vr = PseudoVMV_V_I_MF4 undef $noreg, 0, 4, 3, 0
443    PseudoRET
444...
445---
446# make sure we don't try to perform PRE when one of the blocks is sew/lmul ratio
447# only
448name: pre_same_sewlmul_ratio
449tracksRegLiveness: true
450body:             |
451  ; CHECK-LABEL: name: pre_same_sewlmul_ratio
452  ; CHECK: bb.0:
453  ; CHECK-NEXT:   successors: %bb.2(0x40000000), %bb.1(0x40000000)
454  ; CHECK-NEXT:   liveins: $x10
455  ; CHECK-NEXT: {{  $}}
456  ; CHECK-NEXT:   %cond:gpr = COPY $x10
457  ; CHECK-NEXT:   dead $x0 = PseudoVSETIVLI 2, 215 /* e32, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype
458  ; CHECK-NEXT:   dead [[PseudoVMV_V_I_MF2_:%[0-9]+]]:vr = PseudoVMV_V_I_MF2 undef $noreg, 1, 2, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
459  ; CHECK-NEXT:   BEQ %cond, $x0, %bb.2
460  ; CHECK-NEXT: {{  $}}
461  ; CHECK-NEXT: bb.1:
462  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
463  ; CHECK-NEXT: {{  $}}
464  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
465  ; CHECK-NEXT:   dead [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 undef $noreg, 1, 2, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
466  ; CHECK-NEXT: {{  $}}
467  ; CHECK-NEXT: bb.2:
468  ; CHECK-NEXT:   successors: %bb.4(0x40000000), %bb.3(0x40000000)
469  ; CHECK-NEXT: {{  $}}
470  ; CHECK-NEXT:   BEQ %cond, $x0, %bb.4
471  ; CHECK-NEXT: {{  $}}
472  ; CHECK-NEXT: bb.3:
473  ; CHECK-NEXT:   successors: %bb.4(0x80000000)
474  ; CHECK-NEXT: {{  $}}
475  ; CHECK-NEXT:   PseudoCALL $noreg, csr_ilp32_lp64
476  ; CHECK-NEXT: {{  $}}
477  ; CHECK-NEXT: bb.4:
478  ; CHECK-NEXT:   $x0 = PseudoVSETIVLI 2, 215 /* e32, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype
479  ; CHECK-NEXT:   dead [[PseudoVMV_X_S:%[0-9]+]]:gpr = PseudoVMV_X_S undef $noreg, 5 /* e32 */, implicit $vtype
480  ; CHECK-NEXT:   dead [[PseudoVMV_V_I_MF2_1:%[0-9]+]]:vr = PseudoVMV_V_I_MF2 undef $noreg, 1, 2, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
481  ; CHECK-NEXT:   PseudoRET
482  bb.0:
483    liveins: $x10
484    %cond:gpr = COPY $x10
485    %1:vr = PseudoVMV_V_I_MF2 undef $noreg, 1, 2, 5, 0
486    BEQ %cond, $x0, %bb.2
487  bb.1:
488    %2:vr = PseudoVMV_V_I_M1 undef $noreg, 1, 2, 6, 0
489  bb.2: ; the exit info here should have sew/lmul ratio only
490    BEQ %cond, $x0, %bb.4
491  bb.3:
492    PseudoCALL $noreg, csr_ilp32_lp64
493  bb.4: ; this block will have PRE attempted on it
494    %4:gpr = PseudoVMV_X_S undef $noreg, 5
495    %5:vr = PseudoVMV_V_I_MF2 undef $noreg, 1, 2, 5, 0
496    PseudoRET
497...
498---
499name:            postpass_modify_vl
500tracksRegLiveness: true
501body:             |
502  bb.0:
503    liveins: $x1
504    ; CHECK-LABEL: name: postpass_modify_vl
505    ; CHECK: liveins: $x1
506    ; CHECK-NEXT: {{  $}}
507    ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 3, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
508    ; CHECK-NEXT: dead [[COPY:%[0-9]+]]:gpr = COPY $vtype
509    ; CHECK-NEXT: $vl = COPY $x1
510    ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 3, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
511    ; CHECK-NEXT: dead [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 undef $noreg, undef $noreg, undef $noreg, 3, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
512    ; CHECK-NEXT: PseudoRET
513    dead $x0 = PseudoVSETIVLI 3, 216, implicit-def $vl, implicit-def $vtype
514    %1:gpr = COPY $vtype
515    $vl = COPY $x1
516    dead $x0 = PseudoVSETIVLI 3, 216, implicit-def $vl, implicit-def $vtype
517    %4:vr = PseudoVADD_VV_M1 undef $noreg, undef $noreg, undef $noreg, 3, 6, 0
518    PseudoRET
519...
520---
521name: coalesce_dead_avl_addi
522tracksRegLiveness: true
523body:             |
524  bb.0:
525    ; CHECK-LABEL: name: coalesce_dead_avl_addi
526    ; CHECK: $x0 = PseudoVSETIVLI 3, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
527    ; CHECK-NEXT: dead %x:gpr = PseudoVMV_X_S $noreg, 6 /* e64 */, implicit $vtype
528    ; CHECK-NEXT: $v0 = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 3, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
529    ; CHECK-NEXT: PseudoRET
530    %avl:gprnox0 = ADDI $x0, 42
531    dead $x0 = PseudoVSETVLI killed %avl, 216, implicit-def $vl, implicit-def $vtype
532    %x:gpr = PseudoVMV_X_S $noreg, 6
533    dead $x0 = PseudoVSETIVLI 3, 216, implicit-def $vl, implicit-def $vtype
534    $v0 = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 3, 6, 0
535    PseudoRET
536...
537---
538name: coalesce_dead_avl_nonvolatile_load
539tracksRegLiveness: true
540body:             |
541  bb.0:
542    liveins: $x1
543    ; CHECK-LABEL: name: coalesce_dead_avl_nonvolatile_load
544    ; CHECK: liveins: $x1
545    ; CHECK-NEXT: {{  $}}
546    ; CHECK-NEXT: %ptr:gpr = COPY $x1
547    ; CHECK-NEXT: dead %avl:gprnox0 = LW %ptr, 0 :: (dereferenceable load (s32))
548    ; CHECK-NEXT: $x0 = PseudoVSETIVLI 3, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
549    ; CHECK-NEXT: dead %x:gpr = PseudoVMV_X_S $noreg, 6 /* e64 */, implicit $vtype
550    ; CHECK-NEXT: $v0 = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 3, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
551    ; CHECK-NEXT: PseudoRET
552    %ptr:gpr = COPY $x1
553    %avl:gprnox0 = LW killed %ptr, 0 :: (dereferenceable load (s32))
554    dead $x0 = PseudoVSETVLI killed %avl, 216, implicit-def $vl, implicit-def $vtype
555    %x:gpr = PseudoVMV_X_S $noreg, 6
556    dead $x0 = PseudoVSETIVLI 3, 216, implicit-def $vl, implicit-def $vtype
557    $v0 = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 3, 6, 0
558    PseudoRET
559...
560---
561name: coalesce_dead_avl_volatile_load
562tracksRegLiveness: true
563body:             |
564  bb.0:
565    liveins: $x1
566    ; CHECK-LABEL: name: coalesce_dead_avl_volatile_load
567    ; CHECK: liveins: $x1
568    ; CHECK-NEXT: {{  $}}
569    ; CHECK-NEXT: %ptr:gpr = COPY $x1
570    ; CHECK-NEXT: dead %avl:gprnox0 = LW %ptr, 0 :: (volatile dereferenceable load (s32))
571    ; CHECK-NEXT: $x0 = PseudoVSETIVLI 3, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
572    ; CHECK-NEXT: dead %x:gpr = PseudoVMV_X_S $noreg, 6 /* e64 */, implicit $vtype
573    ; CHECK-NEXT: $v0 = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 3, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
574    ; CHECK-NEXT: PseudoRET
575    %ptr:gpr = COPY $x1
576    %avl:gprnox0 = LW killed %ptr, 0 :: (volatile dereferenceable load (s32))
577    dead $x0 = PseudoVSETVLI killed %avl, 216, implicit-def $vl, implicit-def $vtype
578    %x:gpr = PseudoVMV_X_S $noreg, 6
579    dead $x0 = PseudoVSETIVLI 3, 216, implicit-def $vl, implicit-def $vtype
580    $v0 = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 3, 6, 0
581    PseudoRET
582...
583---
584name: coalesce_shrink_removed_vsetvlis_uses
585tracksRegLiveness: true
586body: |
587  bb.0:
588    liveins: $x10, $v8
589    ; CHECK-LABEL: name: coalesce_shrink_removed_vsetvlis_uses
590    ; CHECK: liveins: $x10, $v8
591    ; CHECK-NEXT: {{  $}}
592    ; CHECK-NEXT: %avl2:gprnox0 = ADDI $x0, 2
593    ; CHECK-NEXT: dead $x0 = PseudoVSETVLI %avl2, 209 /* e32, m2, ta, ma */, implicit-def $vl, implicit-def $vtype
594    ; CHECK-NEXT: %x:gpr = COPY $x10
595    ; CHECK-NEXT: renamable $v8 = PseudoVMV_S_X undef renamable $v8, %x, 1, 5 /* e32 */, implicit $vl, implicit $vtype
596    ; CHECK-NEXT: PseudoRET implicit $v8
597    %avl1:gprnox0 = ADDI $x0, 1
598    dead $x0 = PseudoVSETVLI %avl1:gprnox0, 209, implicit-def dead $vl, implicit-def dead $vtype
599    %avl2:gprnox0 = ADDI $x0, 2
600    dead $x0 = PseudoVSETVLI %avl2:gprnox0, 209, implicit-def dead $vl, implicit-def dead $vtype
601    %x:gpr = COPY $x10
602    renamable $v8 = PseudoVMV_S_X undef renamable $v8, killed renamable %x, 1, 5
603    PseudoRET implicit $v8
604