xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/stack-folding.ll (revision 97982a8c605fac7c86d02e641a6cd7898b3ca343)
163b534beSLuke Lau; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
2a81902ffSLuke Lau; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zfbfmin,+zvfh,+zvfbfmin -verify-machineinstrs | FileCheck --check-prefixes=CHECK,RV32,ZFMIN %s
3a81902ffSLuke Lau; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zfbfmin,+zvfh,+zvfbfmin -verify-machineinstrs | FileCheck --check-prefixes=CHECK,RV64,ZFMIN %s
4a81902ffSLuke Lau; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin -verify-machineinstrs | FileCheck --check-prefixes=CHECK,RV32,NOZFMIN %s
5a81902ffSLuke Lau; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin -verify-machineinstrs | FileCheck --check-prefixes=CHECK,RV64,NOZFMIN %s
663b534beSLuke Lau
763b534beSLuke Laudefine i64 @i64(<vscale x 1 x i64> %v, i1 %c) {
863b534beSLuke Lau; RV32-LABEL: i64:
963b534beSLuke Lau; RV32:       # %bb.0:
1063b534beSLuke Lau; RV32-NEXT:    addi sp, sp, -16
1163b534beSLuke Lau; RV32-NEXT:    .cfi_def_cfa_offset 16
1263b534beSLuke Lau; RV32-NEXT:    csrr a1, vlenb
1363b534beSLuke Lau; RV32-NEXT:    sub sp, sp, a1
14ab393ceeSCraig Topper; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 1 * vlenb
1563b534beSLuke Lau; RV32-NEXT:    addi a1, sp, 16
1663b534beSLuke Lau; RV32-NEXT:    vs1r.v v8, (a1) # Unknown-size Folded Spill
1763b534beSLuke Lau; RV32-NEXT:    andi a0, a0, 1
1863b534beSLuke Lau; RV32-NEXT:    #APP
1963b534beSLuke Lau; RV32-NEXT:    #NO_APP
2063b534beSLuke Lau; RV32-NEXT:    beqz a0, .LBB0_2
2163b534beSLuke Lau; RV32-NEXT:  # %bb.1: # %truebb
2263b534beSLuke Lau; RV32-NEXT:    li a0, 32
2363b534beSLuke Lau; RV32-NEXT:    vl1r.v v9, (a1) # Unknown-size Folded Reload
2463b534beSLuke Lau; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
2563b534beSLuke Lau; RV32-NEXT:    vsrl.vx v8, v9, a0
2663b534beSLuke Lau; RV32-NEXT:    vmv.x.s a1, v8
2763b534beSLuke Lau; RV32-NEXT:    vmv.x.s a0, v9
2863b534beSLuke Lau; RV32-NEXT:    j .LBB0_3
2963b534beSLuke Lau; RV32-NEXT:  .LBB0_2: # %falsebb
3063b534beSLuke Lau; RV32-NEXT:    li a1, 0
3163b534beSLuke Lau; RV32-NEXT:  .LBB0_3: # %falsebb
3263b534beSLuke Lau; RV32-NEXT:    csrr a2, vlenb
3363b534beSLuke Lau; RV32-NEXT:    add sp, sp, a2
34*97982a8cSdlav-sc; RV32-NEXT:    .cfi_def_cfa sp, 16
3563b534beSLuke Lau; RV32-NEXT:    addi sp, sp, 16
36*97982a8cSdlav-sc; RV32-NEXT:    .cfi_def_cfa_offset 0
3763b534beSLuke Lau; RV32-NEXT:    ret
3863b534beSLuke Lau;
3963b534beSLuke Lau; RV64-LABEL: i64:
4063b534beSLuke Lau; RV64:       # %bb.0:
4163b534beSLuke Lau; RV64-NEXT:    addi sp, sp, -16
4263b534beSLuke Lau; RV64-NEXT:    .cfi_def_cfa_offset 16
4363b534beSLuke Lau; RV64-NEXT:    csrr a1, vlenb
4463b534beSLuke Lau; RV64-NEXT:    sub sp, sp, a1
45ab393ceeSCraig Topper; RV64-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 1 * vlenb
4663b534beSLuke Lau; RV64-NEXT:    addi a1, sp, 16
4763b534beSLuke Lau; RV64-NEXT:    vs1r.v v8, (a1) # Unknown-size Folded Spill
4863b534beSLuke Lau; RV64-NEXT:    andi a0, a0, 1
4963b534beSLuke Lau; RV64-NEXT:    #APP
5063b534beSLuke Lau; RV64-NEXT:    #NO_APP
5163b534beSLuke Lau; RV64-NEXT:    beqz a0, .LBB0_2
5263b534beSLuke Lau; RV64-NEXT:  # %bb.1: # %truebb
5363b534beSLuke Lau; RV64-NEXT:    ld a0, 16(sp) # 8-byte Folded Reload
5463b534beSLuke Lau; RV64-NEXT:  .LBB0_2: # %falsebb
5563b534beSLuke Lau; RV64-NEXT:    csrr a1, vlenb
5663b534beSLuke Lau; RV64-NEXT:    add sp, sp, a1
57*97982a8cSdlav-sc; RV64-NEXT:    .cfi_def_cfa sp, 16
5863b534beSLuke Lau; RV64-NEXT:    addi sp, sp, 16
59*97982a8cSdlav-sc; RV64-NEXT:    .cfi_def_cfa_offset 0
6063b534beSLuke Lau; RV64-NEXT:    ret
6163b534beSLuke Lau  tail call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
6263b534beSLuke Lau  br i1 %c, label %truebb, label %falsebb
6363b534beSLuke Lautruebb:
6463b534beSLuke Lau  %x = extractelement <vscale x 1 x i64> %v, i32 0
6563b534beSLuke Lau  ret i64 %x
6663b534beSLuke Laufalsebb:
6763b534beSLuke Lau  ret i64 0
6863b534beSLuke Lau}
6963b534beSLuke Lau
7063b534beSLuke Laudefine i32 @i32(<vscale x 2 x i32> %v, i1 %c) {
7163b534beSLuke Lau; CHECK-LABEL: i32:
7263b534beSLuke Lau; CHECK:       # %bb.0:
7363b534beSLuke Lau; CHECK-NEXT:    addi sp, sp, -16
7463b534beSLuke Lau; CHECK-NEXT:    .cfi_def_cfa_offset 16
7563b534beSLuke Lau; CHECK-NEXT:    csrr a1, vlenb
7663b534beSLuke Lau; CHECK-NEXT:    sub sp, sp, a1
77ab393ceeSCraig Topper; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 1 * vlenb
7863b534beSLuke Lau; CHECK-NEXT:    addi a1, sp, 16
7963b534beSLuke Lau; CHECK-NEXT:    vs1r.v v8, (a1) # Unknown-size Folded Spill
8063b534beSLuke Lau; CHECK-NEXT:    andi a0, a0, 1
8163b534beSLuke Lau; CHECK-NEXT:    #APP
8263b534beSLuke Lau; CHECK-NEXT:    #NO_APP
8363b534beSLuke Lau; CHECK-NEXT:    beqz a0, .LBB1_2
8463b534beSLuke Lau; CHECK-NEXT:  # %bb.1: # %truebb
8563b534beSLuke Lau; CHECK-NEXT:    lw a0, 16(sp) # 8-byte Folded Reload
8663b534beSLuke Lau; CHECK-NEXT:  .LBB1_2: # %falsebb
8763b534beSLuke Lau; CHECK-NEXT:    csrr a1, vlenb
8863b534beSLuke Lau; CHECK-NEXT:    add sp, sp, a1
89*97982a8cSdlav-sc; CHECK-NEXT:    .cfi_def_cfa sp, 16
9063b534beSLuke Lau; CHECK-NEXT:    addi sp, sp, 16
91*97982a8cSdlav-sc; CHECK-NEXT:    .cfi_def_cfa_offset 0
9263b534beSLuke Lau; CHECK-NEXT:    ret
9363b534beSLuke Lau  tail call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
9463b534beSLuke Lau  br i1 %c, label %truebb, label %falsebb
9563b534beSLuke Lautruebb:
9663b534beSLuke Lau  %x = extractelement <vscale x 2 x i32> %v, i32 0
9763b534beSLuke Lau  ret i32 %x
9863b534beSLuke Laufalsebb:
9963b534beSLuke Lau  ret i32 0
10063b534beSLuke Lau}
10163b534beSLuke Lau
10263b534beSLuke Laudefine i16 @i16(<vscale x 4 x i16> %v, i1 %c) {
10363b534beSLuke Lau; CHECK-LABEL: i16:
10463b534beSLuke Lau; CHECK:       # %bb.0:
10563b534beSLuke Lau; CHECK-NEXT:    addi sp, sp, -16
10663b534beSLuke Lau; CHECK-NEXT:    .cfi_def_cfa_offset 16
10763b534beSLuke Lau; CHECK-NEXT:    csrr a1, vlenb
10863b534beSLuke Lau; CHECK-NEXT:    sub sp, sp, a1
109ab393ceeSCraig Topper; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 1 * vlenb
11063b534beSLuke Lau; CHECK-NEXT:    addi a1, sp, 16
11163b534beSLuke Lau; CHECK-NEXT:    vs1r.v v8, (a1) # Unknown-size Folded Spill
11263b534beSLuke Lau; CHECK-NEXT:    andi a0, a0, 1
11363b534beSLuke Lau; CHECK-NEXT:    #APP
11463b534beSLuke Lau; CHECK-NEXT:    #NO_APP
11563b534beSLuke Lau; CHECK-NEXT:    beqz a0, .LBB2_2
11663b534beSLuke Lau; CHECK-NEXT:  # %bb.1: # %truebb
11763b534beSLuke Lau; CHECK-NEXT:    lh a0, 16(sp) # 8-byte Folded Reload
11863b534beSLuke Lau; CHECK-NEXT:  .LBB2_2: # %falsebb
11963b534beSLuke Lau; CHECK-NEXT:    csrr a1, vlenb
12063b534beSLuke Lau; CHECK-NEXT:    add sp, sp, a1
121*97982a8cSdlav-sc; CHECK-NEXT:    .cfi_def_cfa sp, 16
12263b534beSLuke Lau; CHECK-NEXT:    addi sp, sp, 16
123*97982a8cSdlav-sc; CHECK-NEXT:    .cfi_def_cfa_offset 0
12463b534beSLuke Lau; CHECK-NEXT:    ret
12563b534beSLuke Lau  tail call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
12663b534beSLuke Lau  br i1 %c, label %truebb, label %falsebb
12763b534beSLuke Lautruebb:
12863b534beSLuke Lau  %x = extractelement <vscale x 4 x i16> %v, i32 0
12963b534beSLuke Lau  ret i16 %x
13063b534beSLuke Laufalsebb:
13163b534beSLuke Lau  ret i16 0
13263b534beSLuke Lau}
13363b534beSLuke Lau
13463b534beSLuke Laudefine i8 @i8(<vscale x 8 x i8> %v, i1 %c) {
13563b534beSLuke Lau; CHECK-LABEL: i8:
13663b534beSLuke Lau; CHECK:       # %bb.0:
13763b534beSLuke Lau; CHECK-NEXT:    addi sp, sp, -16
13863b534beSLuke Lau; CHECK-NEXT:    .cfi_def_cfa_offset 16
13963b534beSLuke Lau; CHECK-NEXT:    csrr a1, vlenb
14063b534beSLuke Lau; CHECK-NEXT:    sub sp, sp, a1
141ab393ceeSCraig Topper; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 1 * vlenb
14263b534beSLuke Lau; CHECK-NEXT:    addi a1, sp, 16
14363b534beSLuke Lau; CHECK-NEXT:    vs1r.v v8, (a1) # Unknown-size Folded Spill
14463b534beSLuke Lau; CHECK-NEXT:    andi a0, a0, 1
14563b534beSLuke Lau; CHECK-NEXT:    #APP
14663b534beSLuke Lau; CHECK-NEXT:    #NO_APP
14763b534beSLuke Lau; CHECK-NEXT:    beqz a0, .LBB3_2
14863b534beSLuke Lau; CHECK-NEXT:  # %bb.1: # %truebb
14963b534beSLuke Lau; CHECK-NEXT:    lb a0, 16(sp) # 8-byte Folded Reload
15063b534beSLuke Lau; CHECK-NEXT:  .LBB3_2: # %falsebb
15163b534beSLuke Lau; CHECK-NEXT:    csrr a1, vlenb
15263b534beSLuke Lau; CHECK-NEXT:    add sp, sp, a1
153*97982a8cSdlav-sc; CHECK-NEXT:    .cfi_def_cfa sp, 16
15463b534beSLuke Lau; CHECK-NEXT:    addi sp, sp, 16
155*97982a8cSdlav-sc; CHECK-NEXT:    .cfi_def_cfa_offset 0
15663b534beSLuke Lau; CHECK-NEXT:    ret
15763b534beSLuke Lau  tail call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
15863b534beSLuke Lau  br i1 %c, label %truebb, label %falsebb
15963b534beSLuke Lautruebb:
16063b534beSLuke Lau  %x = extractelement <vscale x 8 x i8> %v, i32 0
16163b534beSLuke Lau  ret i8 %x
16263b534beSLuke Laufalsebb:
16363b534beSLuke Lau  ret i8 0
16463b534beSLuke Lau}
165f6dacda9SLuke Lau
166f6dacda9SLuke Laudefine double @f64(<vscale x 1 x double> %v, i1 %c) {
167f6dacda9SLuke Lau; RV32-LABEL: f64:
168f6dacda9SLuke Lau; RV32:       # %bb.0:
169f6dacda9SLuke Lau; RV32-NEXT:    addi sp, sp, -16
170f6dacda9SLuke Lau; RV32-NEXT:    .cfi_def_cfa_offset 16
171f6dacda9SLuke Lau; RV32-NEXT:    csrr a1, vlenb
172f6dacda9SLuke Lau; RV32-NEXT:    sub sp, sp, a1
173ab393ceeSCraig Topper; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 1 * vlenb
174f6dacda9SLuke Lau; RV32-NEXT:    addi a1, sp, 16
175f6dacda9SLuke Lau; RV32-NEXT:    vs1r.v v8, (a1) # Unknown-size Folded Spill
176f6dacda9SLuke Lau; RV32-NEXT:    andi a0, a0, 1
177f6dacda9SLuke Lau; RV32-NEXT:    #APP
178f6dacda9SLuke Lau; RV32-NEXT:    #NO_APP
179f6dacda9SLuke Lau; RV32-NEXT:    beqz a0, .LBB4_2
180f6dacda9SLuke Lau; RV32-NEXT:  # %bb.1: # %truebb
181f6dacda9SLuke Lau; RV32-NEXT:    fld fa0, 16(sp) # 8-byte Folded Reload
182f6dacda9SLuke Lau; RV32-NEXT:    j .LBB4_3
183f6dacda9SLuke Lau; RV32-NEXT:  .LBB4_2: # %falsebb
184f6dacda9SLuke Lau; RV32-NEXT:    fcvt.d.w fa0, zero
185f6dacda9SLuke Lau; RV32-NEXT:  .LBB4_3: # %falsebb
186f6dacda9SLuke Lau; RV32-NEXT:    csrr a0, vlenb
187f6dacda9SLuke Lau; RV32-NEXT:    add sp, sp, a0
188*97982a8cSdlav-sc; RV32-NEXT:    .cfi_def_cfa sp, 16
189f6dacda9SLuke Lau; RV32-NEXT:    addi sp, sp, 16
190*97982a8cSdlav-sc; RV32-NEXT:    .cfi_def_cfa_offset 0
191f6dacda9SLuke Lau; RV32-NEXT:    ret
192f6dacda9SLuke Lau;
193f6dacda9SLuke Lau; RV64-LABEL: f64:
194f6dacda9SLuke Lau; RV64:       # %bb.0:
195f6dacda9SLuke Lau; RV64-NEXT:    addi sp, sp, -16
196f6dacda9SLuke Lau; RV64-NEXT:    .cfi_def_cfa_offset 16
197f6dacda9SLuke Lau; RV64-NEXT:    csrr a1, vlenb
198f6dacda9SLuke Lau; RV64-NEXT:    sub sp, sp, a1
199ab393ceeSCraig Topper; RV64-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 1 * vlenb
200f6dacda9SLuke Lau; RV64-NEXT:    addi a1, sp, 16
201f6dacda9SLuke Lau; RV64-NEXT:    vs1r.v v8, (a1) # Unknown-size Folded Spill
202f6dacda9SLuke Lau; RV64-NEXT:    andi a0, a0, 1
203f6dacda9SLuke Lau; RV64-NEXT:    #APP
204f6dacda9SLuke Lau; RV64-NEXT:    #NO_APP
205f6dacda9SLuke Lau; RV64-NEXT:    beqz a0, .LBB4_2
206f6dacda9SLuke Lau; RV64-NEXT:  # %bb.1: # %truebb
207f6dacda9SLuke Lau; RV64-NEXT:    fld fa0, 16(sp) # 8-byte Folded Reload
208f6dacda9SLuke Lau; RV64-NEXT:    j .LBB4_3
209f6dacda9SLuke Lau; RV64-NEXT:  .LBB4_2: # %falsebb
210f6dacda9SLuke Lau; RV64-NEXT:    fmv.d.x fa0, zero
211f6dacda9SLuke Lau; RV64-NEXT:  .LBB4_3: # %falsebb
212f6dacda9SLuke Lau; RV64-NEXT:    csrr a0, vlenb
213f6dacda9SLuke Lau; RV64-NEXT:    add sp, sp, a0
214*97982a8cSdlav-sc; RV64-NEXT:    .cfi_def_cfa sp, 16
215f6dacda9SLuke Lau; RV64-NEXT:    addi sp, sp, 16
216*97982a8cSdlav-sc; RV64-NEXT:    .cfi_def_cfa_offset 0
217f6dacda9SLuke Lau; RV64-NEXT:    ret
218f6dacda9SLuke Lau  tail call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
219f6dacda9SLuke Lau  br i1 %c, label %truebb, label %falsebb
220f6dacda9SLuke Lautruebb:
221f6dacda9SLuke Lau  %x = extractelement <vscale x 1 x double> %v, i32 0
222f6dacda9SLuke Lau  ret double %x
223f6dacda9SLuke Laufalsebb:
224f6dacda9SLuke Lau  ret double 0.0
225f6dacda9SLuke Lau}
226f6dacda9SLuke Lau
227f6dacda9SLuke Laudefine float @f32(<vscale x 2 x float> %v, i1 %c) {
228f6dacda9SLuke Lau; CHECK-LABEL: f32:
229f6dacda9SLuke Lau; CHECK:       # %bb.0:
230f6dacda9SLuke Lau; CHECK-NEXT:    addi sp, sp, -16
231f6dacda9SLuke Lau; CHECK-NEXT:    .cfi_def_cfa_offset 16
232f6dacda9SLuke Lau; CHECK-NEXT:    csrr a1, vlenb
233f6dacda9SLuke Lau; CHECK-NEXT:    sub sp, sp, a1
234ab393ceeSCraig Topper; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 1 * vlenb
235f6dacda9SLuke Lau; CHECK-NEXT:    addi a1, sp, 16
236f6dacda9SLuke Lau; CHECK-NEXT:    vs1r.v v8, (a1) # Unknown-size Folded Spill
237f6dacda9SLuke Lau; CHECK-NEXT:    andi a0, a0, 1
238f6dacda9SLuke Lau; CHECK-NEXT:    #APP
239f6dacda9SLuke Lau; CHECK-NEXT:    #NO_APP
240f6dacda9SLuke Lau; CHECK-NEXT:    beqz a0, .LBB5_2
241f6dacda9SLuke Lau; CHECK-NEXT:  # %bb.1: # %truebb
242f6dacda9SLuke Lau; CHECK-NEXT:    flw fa0, 16(sp) # 8-byte Folded Reload
243f6dacda9SLuke Lau; CHECK-NEXT:    j .LBB5_3
244f6dacda9SLuke Lau; CHECK-NEXT:  .LBB5_2: # %falsebb
245f6dacda9SLuke Lau; CHECK-NEXT:    fmv.w.x fa0, zero
246f6dacda9SLuke Lau; CHECK-NEXT:  .LBB5_3: # %falsebb
247f6dacda9SLuke Lau; CHECK-NEXT:    csrr a0, vlenb
248f6dacda9SLuke Lau; CHECK-NEXT:    add sp, sp, a0
249*97982a8cSdlav-sc; CHECK-NEXT:    .cfi_def_cfa sp, 16
250f6dacda9SLuke Lau; CHECK-NEXT:    addi sp, sp, 16
251*97982a8cSdlav-sc; CHECK-NEXT:    .cfi_def_cfa_offset 0
252f6dacda9SLuke Lau; CHECK-NEXT:    ret
253f6dacda9SLuke Lau  tail call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
254f6dacda9SLuke Lau  br i1 %c, label %truebb, label %falsebb
255f6dacda9SLuke Lautruebb:
256f6dacda9SLuke Lau  %x = extractelement <vscale x 2 x float> %v, i32 0
257f6dacda9SLuke Lau  ret float %x
258f6dacda9SLuke Laufalsebb:
259f6dacda9SLuke Lau  ret float 0.0
260f6dacda9SLuke Lau}
261f6dacda9SLuke Lau
262a81902ffSLuke Laudefine half @f16(<vscale x 1 x half> %v, i1 %c) {
263a81902ffSLuke Lau; ZFMIN-LABEL: f16:
264a81902ffSLuke Lau; ZFMIN:       # %bb.0:
265a81902ffSLuke Lau; ZFMIN-NEXT:    addi sp, sp, -16
266a81902ffSLuke Lau; ZFMIN-NEXT:    .cfi_def_cfa_offset 16
267a81902ffSLuke Lau; ZFMIN-NEXT:    csrr a1, vlenb
268a81902ffSLuke Lau; ZFMIN-NEXT:    sub sp, sp, a1
269b6091632SLuke Lau; ZFMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 1 * vlenb
270a81902ffSLuke Lau; ZFMIN-NEXT:    addi a1, sp, 16
271a81902ffSLuke Lau; ZFMIN-NEXT:    vs1r.v v8, (a1) # Unknown-size Folded Spill
272a81902ffSLuke Lau; ZFMIN-NEXT:    andi a0, a0, 1
273a81902ffSLuke Lau; ZFMIN-NEXT:    #APP
274a81902ffSLuke Lau; ZFMIN-NEXT:    #NO_APP
275a81902ffSLuke Lau; ZFMIN-NEXT:    beqz a0, .LBB6_2
276a81902ffSLuke Lau; ZFMIN-NEXT:  # %bb.1: # %truebb
277a81902ffSLuke Lau; ZFMIN-NEXT:    flh fa0, 16(sp) # 8-byte Folded Reload
278a81902ffSLuke Lau; ZFMIN-NEXT:    j .LBB6_3
279a81902ffSLuke Lau; ZFMIN-NEXT:  .LBB6_2: # %falsebb
280a81902ffSLuke Lau; ZFMIN-NEXT:    fmv.h.x fa0, zero
281a81902ffSLuke Lau; ZFMIN-NEXT:  .LBB6_3: # %falsebb
282a81902ffSLuke Lau; ZFMIN-NEXT:    csrr a0, vlenb
283a81902ffSLuke Lau; ZFMIN-NEXT:    add sp, sp, a0
284*97982a8cSdlav-sc; ZFMIN-NEXT:    .cfi_def_cfa sp, 16
285a81902ffSLuke Lau; ZFMIN-NEXT:    addi sp, sp, 16
286*97982a8cSdlav-sc; ZFMIN-NEXT:    .cfi_def_cfa_offset 0
287a81902ffSLuke Lau; ZFMIN-NEXT:    ret
288a81902ffSLuke Lau;
289a81902ffSLuke Lau; NOZFMIN-LABEL: f16:
290a81902ffSLuke Lau; NOZFMIN:       # %bb.0:
291a81902ffSLuke Lau; NOZFMIN-NEXT:    addi sp, sp, -16
292a81902ffSLuke Lau; NOZFMIN-NEXT:    .cfi_def_cfa_offset 16
293a81902ffSLuke Lau; NOZFMIN-NEXT:    csrr a1, vlenb
294a81902ffSLuke Lau; NOZFMIN-NEXT:    sub sp, sp, a1
295b6091632SLuke Lau; NOZFMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 1 * vlenb
296a81902ffSLuke Lau; NOZFMIN-NEXT:    addi a1, sp, 16
297a81902ffSLuke Lau; NOZFMIN-NEXT:    vs1r.v v8, (a1) # Unknown-size Folded Spill
298a81902ffSLuke Lau; NOZFMIN-NEXT:    andi a0, a0, 1
299a81902ffSLuke Lau; NOZFMIN-NEXT:    #APP
300a81902ffSLuke Lau; NOZFMIN-NEXT:    #NO_APP
301a81902ffSLuke Lau; NOZFMIN-NEXT:    beqz a0, .LBB6_2
302a81902ffSLuke Lau; NOZFMIN-NEXT:  # %bb.1: # %truebb
303a81902ffSLuke Lau; NOZFMIN-NEXT:    lh a0, 16(sp) # 8-byte Folded Reload
304a81902ffSLuke Lau; NOZFMIN-NEXT:    lui a1, 1048560
305a81902ffSLuke Lau; NOZFMIN-NEXT:    or a0, a0, a1
306a81902ffSLuke Lau; NOZFMIN-NEXT:    j .LBB6_3
307a81902ffSLuke Lau; NOZFMIN-NEXT:  .LBB6_2: # %falsebb
308a81902ffSLuke Lau; NOZFMIN-NEXT:    lui a0, 1048560
309a81902ffSLuke Lau; NOZFMIN-NEXT:  .LBB6_3: # %falsebb
310a81902ffSLuke Lau; NOZFMIN-NEXT:    fmv.w.x fa0, a0
311a81902ffSLuke Lau; NOZFMIN-NEXT:    csrr a0, vlenb
312a81902ffSLuke Lau; NOZFMIN-NEXT:    add sp, sp, a0
313*97982a8cSdlav-sc; NOZFMIN-NEXT:    .cfi_def_cfa sp, 16
314a81902ffSLuke Lau; NOZFMIN-NEXT:    addi sp, sp, 16
315*97982a8cSdlav-sc; NOZFMIN-NEXT:    .cfi_def_cfa_offset 0
316a81902ffSLuke Lau; NOZFMIN-NEXT:    ret
317a81902ffSLuke Lau  tail call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
318a81902ffSLuke Lau  br i1 %c, label %truebb, label %falsebb
319a81902ffSLuke Lautruebb:
320a81902ffSLuke Lau  %x = extractelement <vscale x 1 x half> %v, i32 0
321a81902ffSLuke Lau  ret half %x
322a81902ffSLuke Laufalsebb:
323a81902ffSLuke Lau  ret half 0.0
324a81902ffSLuke Lau}
325a81902ffSLuke Lau
326a81902ffSLuke Laudefine bfloat @bf16(<vscale x 2 x bfloat> %v, i1 %c) {
327a81902ffSLuke Lau; ZFMIN-LABEL: bf16:
328a81902ffSLuke Lau; ZFMIN:       # %bb.0:
329a81902ffSLuke Lau; ZFMIN-NEXT:    addi sp, sp, -16
330a81902ffSLuke Lau; ZFMIN-NEXT:    .cfi_def_cfa_offset 16
331a81902ffSLuke Lau; ZFMIN-NEXT:    csrr a1, vlenb
332a81902ffSLuke Lau; ZFMIN-NEXT:    sub sp, sp, a1
333b6091632SLuke Lau; ZFMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 1 * vlenb
334a81902ffSLuke Lau; ZFMIN-NEXT:    addi a1, sp, 16
335a81902ffSLuke Lau; ZFMIN-NEXT:    vs1r.v v8, (a1) # Unknown-size Folded Spill
336a81902ffSLuke Lau; ZFMIN-NEXT:    andi a0, a0, 1
337a81902ffSLuke Lau; ZFMIN-NEXT:    #APP
338a81902ffSLuke Lau; ZFMIN-NEXT:    #NO_APP
339a81902ffSLuke Lau; ZFMIN-NEXT:    beqz a0, .LBB7_2
340a81902ffSLuke Lau; ZFMIN-NEXT:  # %bb.1: # %truebb
341a81902ffSLuke Lau; ZFMIN-NEXT:    lh a0, 16(sp) # 8-byte Folded Reload
342a81902ffSLuke Lau; ZFMIN-NEXT:    fmv.h.x fa0, a0
343a81902ffSLuke Lau; ZFMIN-NEXT:    j .LBB7_3
344a81902ffSLuke Lau; ZFMIN-NEXT:  .LBB7_2: # %falsebb
345a81902ffSLuke Lau; ZFMIN-NEXT:    fmv.h.x fa0, zero
346a81902ffSLuke Lau; ZFMIN-NEXT:  .LBB7_3: # %falsebb
347a81902ffSLuke Lau; ZFMIN-NEXT:    csrr a0, vlenb
348a81902ffSLuke Lau; ZFMIN-NEXT:    add sp, sp, a0
349*97982a8cSdlav-sc; ZFMIN-NEXT:    .cfi_def_cfa sp, 16
350a81902ffSLuke Lau; ZFMIN-NEXT:    addi sp, sp, 16
351*97982a8cSdlav-sc; ZFMIN-NEXT:    .cfi_def_cfa_offset 0
352a81902ffSLuke Lau; ZFMIN-NEXT:    ret
353a81902ffSLuke Lau;
354a81902ffSLuke Lau; NOZFMIN-LABEL: bf16:
355a81902ffSLuke Lau; NOZFMIN:       # %bb.0:
356a81902ffSLuke Lau; NOZFMIN-NEXT:    addi sp, sp, -16
357a81902ffSLuke Lau; NOZFMIN-NEXT:    .cfi_def_cfa_offset 16
358a81902ffSLuke Lau; NOZFMIN-NEXT:    csrr a1, vlenb
359a81902ffSLuke Lau; NOZFMIN-NEXT:    sub sp, sp, a1
360b6091632SLuke Lau; NOZFMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 1 * vlenb
361a81902ffSLuke Lau; NOZFMIN-NEXT:    addi a1, sp, 16
362a81902ffSLuke Lau; NOZFMIN-NEXT:    vs1r.v v8, (a1) # Unknown-size Folded Spill
363a81902ffSLuke Lau; NOZFMIN-NEXT:    andi a0, a0, 1
364a81902ffSLuke Lau; NOZFMIN-NEXT:    #APP
365a81902ffSLuke Lau; NOZFMIN-NEXT:    #NO_APP
366a81902ffSLuke Lau; NOZFMIN-NEXT:    beqz a0, .LBB7_2
367a81902ffSLuke Lau; NOZFMIN-NEXT:  # %bb.1: # %truebb
368a81902ffSLuke Lau; NOZFMIN-NEXT:    lh a0, 16(sp) # 8-byte Folded Reload
369a81902ffSLuke Lau; NOZFMIN-NEXT:    lui a1, 1048560
370a81902ffSLuke Lau; NOZFMIN-NEXT:    or a0, a0, a1
371a81902ffSLuke Lau; NOZFMIN-NEXT:    j .LBB7_3
372a81902ffSLuke Lau; NOZFMIN-NEXT:  .LBB7_2: # %falsebb
373a81902ffSLuke Lau; NOZFMIN-NEXT:    lui a0, 1048560
374a81902ffSLuke Lau; NOZFMIN-NEXT:  .LBB7_3: # %falsebb
375a81902ffSLuke Lau; NOZFMIN-NEXT:    fmv.w.x fa0, a0
376a81902ffSLuke Lau; NOZFMIN-NEXT:    csrr a0, vlenb
377a81902ffSLuke Lau; NOZFMIN-NEXT:    add sp, sp, a0
378*97982a8cSdlav-sc; NOZFMIN-NEXT:    .cfi_def_cfa sp, 16
379a81902ffSLuke Lau; NOZFMIN-NEXT:    addi sp, sp, 16
380*97982a8cSdlav-sc; NOZFMIN-NEXT:    .cfi_def_cfa_offset 0
381a81902ffSLuke Lau; NOZFMIN-NEXT:    ret
382a81902ffSLuke Lau  tail call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
383a81902ffSLuke Lau  br i1 %c, label %truebb, label %falsebb
384a81902ffSLuke Lautruebb:
385a81902ffSLuke Lau  %x = extractelement <vscale x 2 x bfloat> %v, i32 0
386a81902ffSLuke Lau  ret bfloat %x
387a81902ffSLuke Laufalsebb:
388a81902ffSLuke Lau  ret bfloat 0.0
389a81902ffSLuke Lau}
390a81902ffSLuke Lau
391