1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s 3; RUN: llc -mtriple=riscv64 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s 4 5declare <vscale x 2 x i7> @llvm.vp.trunc.nxv2i7.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i32) 6 7define <vscale x 2 x i7> @vtrunc_nxv2i7_nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i1> %m, i32 zeroext %vl) { 8; CHECK-LABEL: vtrunc_nxv2i7_nxv2i16: 9; CHECK: # %bb.0: 10; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 11; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t 12; CHECK-NEXT: ret 13 %v = call <vscale x 2 x i7> @llvm.vp.trunc.nxv2i7.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i1> %m, i32 %vl) 14 ret <vscale x 2 x i7> %v 15} 16 17declare <vscale x 2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i15(<vscale x 2 x i15>, <vscale x 2 x i1>, i32) 18 19define <vscale x 2 x i8> @vtrunc_nxv2i8_nxv2i15(<vscale x 2 x i15> %a, <vscale x 2 x i1> %m, i32 zeroext %vl) { 20; CHECK-LABEL: vtrunc_nxv2i8_nxv2i15: 21; CHECK: # %bb.0: 22; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 23; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t 24; CHECK-NEXT: ret 25 %v = call <vscale x 2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i15(<vscale x 2 x i15> %a, <vscale x 2 x i1> %m, i32 %vl) 26 ret <vscale x 2 x i8> %v 27} 28 29declare <vscale x 2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i32) 30 31define <vscale x 2 x i8> @vtrunc_nxv2i8_nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i1> %m, i32 zeroext %vl) { 32; CHECK-LABEL: vtrunc_nxv2i8_nxv2i16: 33; CHECK: # %bb.0: 34; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 35; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t 36; CHECK-NEXT: ret 37 %v = call <vscale x 2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i1> %m, i32 %vl) 38 ret <vscale x 2 x i8> %v 39} 40 41define <vscale x 2 x i8> @vtrunc_nxv2i8_nxv2i16_unmasked(<vscale x 2 x i16> %a, i32 zeroext %vl) { 42; CHECK-LABEL: vtrunc_nxv2i8_nxv2i16_unmasked: 43; CHECK: # %bb.0: 44; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 45; CHECK-NEXT: vnsrl.wi v8, v8, 0 46; CHECK-NEXT: ret 47 %v = call <vscale x 2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i1> splat (i1 true), i32 %vl) 48 ret <vscale x 2 x i8> %v 49} 50 51declare <vscale x 2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32) 52 53define <vscale x 2 x i8> @vtrunc_nxv2i8_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i32 zeroext %vl) { 54; CHECK-LABEL: vtrunc_nxv2i8_nxv2i32: 55; CHECK: # %bb.0: 56; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 57; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t 58; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma 59; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t 60; CHECK-NEXT: ret 61 %v = call <vscale x 2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i32 %vl) 62 ret <vscale x 2 x i8> %v 63} 64 65define <vscale x 2 x i8> @vtrunc_nxv2i8_nxv2i32_unmasked(<vscale x 2 x i32> %a, i32 zeroext %vl) { 66; CHECK-LABEL: vtrunc_nxv2i8_nxv2i32_unmasked: 67; CHECK: # %bb.0: 68; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 69; CHECK-NEXT: vnsrl.wi v8, v8, 0 70; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma 71; CHECK-NEXT: vnsrl.wi v8, v8, 0 72; CHECK-NEXT: ret 73 %v = call <vscale x 2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 true), i32 %vl) 74 ret <vscale x 2 x i8> %v 75} 76 77declare <vscale x 2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i32) 78 79define <vscale x 2 x i8> @vtrunc_nxv2i8_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %m, i32 zeroext %vl) { 80; CHECK-LABEL: vtrunc_nxv2i8_nxv2i64: 81; CHECK: # %bb.0: 82; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 83; CHECK-NEXT: vnsrl.wi v10, v8, 0, v0.t 84; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma 85; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t 86; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma 87; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t 88; CHECK-NEXT: ret 89 %v = call <vscale x 2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %m, i32 %vl) 90 ret <vscale x 2 x i8> %v 91} 92 93define <vscale x 2 x i8> @vtrunc_nxv2i8_nxv2i64_unmasked(<vscale x 2 x i64> %a, i32 zeroext %vl) { 94; CHECK-LABEL: vtrunc_nxv2i8_nxv2i64_unmasked: 95; CHECK: # %bb.0: 96; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 97; CHECK-NEXT: vnsrl.wi v10, v8, 0 98; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma 99; CHECK-NEXT: vnsrl.wi v8, v10, 0 100; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma 101; CHECK-NEXT: vnsrl.wi v8, v8, 0 102; CHECK-NEXT: ret 103 %v = call <vscale x 2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> splat (i1 true), i32 %vl) 104 ret <vscale x 2 x i8> %v 105} 106 107declare <vscale x 2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32) 108 109define <vscale x 2 x i16> @vtrunc_nxv2i16_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i32 zeroext %vl) { 110; CHECK-LABEL: vtrunc_nxv2i16_nxv2i32: 111; CHECK: # %bb.0: 112; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 113; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t 114; CHECK-NEXT: ret 115 %v = call <vscale x 2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i32 %vl) 116 ret <vscale x 2 x i16> %v 117} 118 119define <vscale x 2 x i16> @vtrunc_nxv2i16_nxv2i32_unmasked(<vscale x 2 x i32> %a, i32 zeroext %vl) { 120; CHECK-LABEL: vtrunc_nxv2i16_nxv2i32_unmasked: 121; CHECK: # %bb.0: 122; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 123; CHECK-NEXT: vnsrl.wi v8, v8, 0 124; CHECK-NEXT: ret 125 %v = call <vscale x 2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 true), i32 %vl) 126 ret <vscale x 2 x i16> %v 127} 128 129declare <vscale x 2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i32) 130 131define <vscale x 2 x i16> @vtrunc_nxv2i16_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %m, i32 zeroext %vl) { 132; CHECK-LABEL: vtrunc_nxv2i16_nxv2i64: 133; CHECK: # %bb.0: 134; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 135; CHECK-NEXT: vnsrl.wi v10, v8, 0, v0.t 136; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma 137; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t 138; CHECK-NEXT: ret 139 %v = call <vscale x 2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %m, i32 %vl) 140 ret <vscale x 2 x i16> %v 141} 142 143define <vscale x 2 x i16> @vtrunc_nxv2i16_nxv2i64_unmasked(<vscale x 2 x i64> %a, i32 zeroext %vl) { 144; CHECK-LABEL: vtrunc_nxv2i16_nxv2i64_unmasked: 145; CHECK: # %bb.0: 146; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 147; CHECK-NEXT: vnsrl.wi v10, v8, 0 148; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma 149; CHECK-NEXT: vnsrl.wi v8, v10, 0 150; CHECK-NEXT: ret 151 %v = call <vscale x 2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> splat (i1 true), i32 %vl) 152 ret <vscale x 2 x i16> %v 153} 154 155declare <vscale x 15 x i16> @llvm.vp.trunc.nxv15i16.nxv15i64(<vscale x 15 x i64>, <vscale x 15 x i1>, i32) 156 157define <vscale x 15 x i16> @vtrunc_nxv15i16_nxv15i64(<vscale x 15 x i64> %a, <vscale x 15 x i1> %m, i32 zeroext %vl) { 158; CHECK-LABEL: vtrunc_nxv15i16_nxv15i64: 159; CHECK: # %bb.0: 160; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma 161; CHECK-NEXT: vmv1r.v v24, v0 162; CHECK-NEXT: csrr a1, vlenb 163; CHECK-NEXT: srli a2, a1, 3 164; CHECK-NEXT: sub a3, a0, a1 165; CHECK-NEXT: vslidedown.vx v0, v0, a2 166; CHECK-NEXT: sltu a2, a0, a3 167; CHECK-NEXT: addi a2, a2, -1 168; CHECK-NEXT: and a2, a2, a3 169; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma 170; CHECK-NEXT: vnsrl.wi v28, v16, 0, v0.t 171; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma 172; CHECK-NEXT: vnsrl.wi v18, v28, 0, v0.t 173; CHECK-NEXT: bltu a0, a1, .LBB12_2 174; CHECK-NEXT: # %bb.1: 175; CHECK-NEXT: mv a0, a1 176; CHECK-NEXT: .LBB12_2: 177; CHECK-NEXT: vmv1r.v v0, v24 178; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 179; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t 180; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma 181; CHECK-NEXT: vnsrl.wi v16, v20, 0, v0.t 182; CHECK-NEXT: vmv4r.v v8, v16 183; CHECK-NEXT: ret 184 %v = call <vscale x 15 x i16> @llvm.vp.trunc.nxv15i16.nxv15i64(<vscale x 15 x i64> %a, <vscale x 15 x i1> %m, i32 %vl) 185 ret <vscale x 15 x i16> %v 186} 187 188declare <vscale x 2 x i32> @llvm.vp.trunc.nxv2i32.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i32) 189 190define <vscale x 2 x i32> @vtrunc_nxv2i32_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %m, i32 zeroext %vl) { 191; CHECK-LABEL: vtrunc_nxv2i32_nxv2i64: 192; CHECK: # %bb.0: 193; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 194; CHECK-NEXT: vnsrl.wi v10, v8, 0, v0.t 195; CHECK-NEXT: vmv.v.v v8, v10 196; CHECK-NEXT: ret 197 %v = call <vscale x 2 x i32> @llvm.vp.trunc.nxv2i32.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %m, i32 %vl) 198 ret <vscale x 2 x i32> %v 199} 200 201define <vscale x 2 x i32> @vtrunc_nxv2i32_nxv2i64_unmasked(<vscale x 2 x i64> %a, i32 zeroext %vl) { 202; CHECK-LABEL: vtrunc_nxv2i32_nxv2i64_unmasked: 203; CHECK: # %bb.0: 204; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 205; CHECK-NEXT: vnsrl.wi v10, v8, 0 206; CHECK-NEXT: vmv.v.v v8, v10 207; CHECK-NEXT: ret 208 %v = call <vscale x 2 x i32> @llvm.vp.trunc.nxv2i32.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> splat (i1 true), i32 %vl) 209 ret <vscale x 2 x i32> %v 210} 211 212declare <vscale x 32 x i7> @llvm.vp.trunc.nxv32i7.nxv32i32(<vscale x 32 x i32>, <vscale x 32 x i1>, i32) 213 214define <vscale x 32 x i7> @vtrunc_nxv32i7_nxv32i32(<vscale x 32 x i32> %a, <vscale x 32 x i1> %m, i32 zeroext %vl) { 215; CHECK-LABEL: vtrunc_nxv32i7_nxv32i32: 216; CHECK: # %bb.0: 217; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma 218; CHECK-NEXT: vmv1r.v v24, v0 219; CHECK-NEXT: csrr a1, vlenb 220; CHECK-NEXT: srli a2, a1, 2 221; CHECK-NEXT: slli a1, a1, 1 222; CHECK-NEXT: vslidedown.vx v0, v0, a2 223; CHECK-NEXT: sub a2, a0, a1 224; CHECK-NEXT: sltu a3, a0, a2 225; CHECK-NEXT: addi a3, a3, -1 226; CHECK-NEXT: and a2, a3, a2 227; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma 228; CHECK-NEXT: vnsrl.wi v28, v16, 0, v0.t 229; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma 230; CHECK-NEXT: vnsrl.wi v18, v28, 0, v0.t 231; CHECK-NEXT: bltu a0, a1, .LBB15_2 232; CHECK-NEXT: # %bb.1: 233; CHECK-NEXT: mv a0, a1 234; CHECK-NEXT: .LBB15_2: 235; CHECK-NEXT: vmv1r.v v0, v24 236; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma 237; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t 238; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma 239; CHECK-NEXT: vnsrl.wi v16, v20, 0, v0.t 240; CHECK-NEXT: vmv4r.v v8, v16 241; CHECK-NEXT: ret 242 %v = call <vscale x 32 x i7> @llvm.vp.trunc.nxv32i7.nxv32i32(<vscale x 32 x i32> %a, <vscale x 32 x i1> %m, i32 %vl) 243 ret <vscale x 32 x i7> %v 244} 245 246declare <vscale x 32 x i8> @llvm.vp.trunc.nxv32i8.nxv32i32(<vscale x 32 x i32>, <vscale x 32 x i1>, i32) 247 248define <vscale x 32 x i8> @vtrunc_nxv32i8_nxv32i32(<vscale x 32 x i32> %a, <vscale x 32 x i1> %m, i32 zeroext %vl) { 249; CHECK-LABEL: vtrunc_nxv32i8_nxv32i32: 250; CHECK: # %bb.0: 251; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma 252; CHECK-NEXT: vmv1r.v v24, v0 253; CHECK-NEXT: csrr a1, vlenb 254; CHECK-NEXT: srli a2, a1, 2 255; CHECK-NEXT: slli a1, a1, 1 256; CHECK-NEXT: vslidedown.vx v0, v0, a2 257; CHECK-NEXT: sub a2, a0, a1 258; CHECK-NEXT: sltu a3, a0, a2 259; CHECK-NEXT: addi a3, a3, -1 260; CHECK-NEXT: and a2, a3, a2 261; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma 262; CHECK-NEXT: vnsrl.wi v28, v16, 0, v0.t 263; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma 264; CHECK-NEXT: vnsrl.wi v18, v28, 0, v0.t 265; CHECK-NEXT: bltu a0, a1, .LBB16_2 266; CHECK-NEXT: # %bb.1: 267; CHECK-NEXT: mv a0, a1 268; CHECK-NEXT: .LBB16_2: 269; CHECK-NEXT: vmv1r.v v0, v24 270; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma 271; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t 272; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma 273; CHECK-NEXT: vnsrl.wi v16, v20, 0, v0.t 274; CHECK-NEXT: vmv4r.v v8, v16 275; CHECK-NEXT: ret 276 %v = call <vscale x 32 x i8> @llvm.vp.trunc.nxv32i8.nxv32i32(<vscale x 32 x i32> %a, <vscale x 32 x i1> %m, i32 %vl) 277 ret <vscale x 32 x i8> %v 278} 279 280declare <vscale x 32 x i32> @llvm.vp.trunc.nxv32i32.nxv32i64(<vscale x 32 x i64>, <vscale x 32 x i1>, i32) 281 282define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vscale x 32 x i1> %m, i32 zeroext %vl) { 283; CHECK-LABEL: vtrunc_nxv32i64_nxv32i32: 284; CHECK: # %bb.0: 285; CHECK-NEXT: addi sp, sp, -16 286; CHECK-NEXT: .cfi_def_cfa_offset 16 287; CHECK-NEXT: csrr a1, vlenb 288; CHECK-NEXT: slli a1, a1, 4 289; CHECK-NEXT: sub sp, sp, a1 290; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb 291; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma 292; CHECK-NEXT: vmv1r.v v7, v0 293; CHECK-NEXT: addi a1, sp, 16 294; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill 295; CHECK-NEXT: csrr a1, vlenb 296; CHECK-NEXT: slli a1, a1, 3 297; CHECK-NEXT: add a1, sp, a1 298; CHECK-NEXT: addi a1, a1, 16 299; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill 300; CHECK-NEXT: csrr a1, vlenb 301; CHECK-NEXT: srli a3, a1, 3 302; CHECK-NEXT: srli a5, a1, 2 303; CHECK-NEXT: slli a6, a1, 3 304; CHECK-NEXT: slli a4, a1, 1 305; CHECK-NEXT: vslidedown.vx v16, v0, a5 306; CHECK-NEXT: add a6, a0, a6 307; CHECK-NEXT: sub a5, a2, a4 308; CHECK-NEXT: vl8re64.v v24, (a6) 309; CHECK-NEXT: sltu a6, a2, a5 310; CHECK-NEXT: addi a6, a6, -1 311; CHECK-NEXT: and a5, a6, a5 312; CHECK-NEXT: sub a6, a5, a1 313; CHECK-NEXT: sltu a7, a5, a6 314; CHECK-NEXT: addi a7, a7, -1 315; CHECK-NEXT: vl8re64.v v8, (a0) 316; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma 317; CHECK-NEXT: vslidedown.vx v0, v16, a3 318; CHECK-NEXT: and a0, a7, a6 319; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 320; CHECK-NEXT: vnsrl.wi v20, v24, 0, v0.t 321; CHECK-NEXT: bltu a5, a1, .LBB17_2 322; CHECK-NEXT: # %bb.1: 323; CHECK-NEXT: mv a5, a1 324; CHECK-NEXT: .LBB17_2: 325; CHECK-NEXT: vmv1r.v v0, v16 326; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma 327; CHECK-NEXT: vslidedown.vx v6, v7, a3 328; CHECK-NEXT: vsetvli zero, a5, e32, m4, ta, ma 329; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t 330; CHECK-NEXT: bltu a2, a4, .LBB17_4 331; CHECK-NEXT: # %bb.3: 332; CHECK-NEXT: mv a2, a4 333; CHECK-NEXT: .LBB17_4: 334; CHECK-NEXT: sub a0, a2, a1 335; CHECK-NEXT: sltu a3, a2, a0 336; CHECK-NEXT: addi a3, a3, -1 337; CHECK-NEXT: and a0, a3, a0 338; CHECK-NEXT: vmv1r.v v0, v6 339; CHECK-NEXT: addi a3, sp, 16 340; CHECK-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload 341; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 342; CHECK-NEXT: vnsrl.wi v28, v8, 0, v0.t 343; CHECK-NEXT: bltu a2, a1, .LBB17_6 344; CHECK-NEXT: # %bb.5: 345; CHECK-NEXT: mv a2, a1 346; CHECK-NEXT: .LBB17_6: 347; CHECK-NEXT: vmv1r.v v0, v7 348; CHECK-NEXT: csrr a0, vlenb 349; CHECK-NEXT: slli a0, a0, 3 350; CHECK-NEXT: add a0, sp, a0 351; CHECK-NEXT: addi a0, a0, 16 352; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload 353; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma 354; CHECK-NEXT: vnsrl.wi v24, v8, 0, v0.t 355; CHECK-NEXT: vmv8r.v v8, v24 356; CHECK-NEXT: csrr a0, vlenb 357; CHECK-NEXT: slli a0, a0, 4 358; CHECK-NEXT: add sp, sp, a0 359; CHECK-NEXT: .cfi_def_cfa sp, 16 360; CHECK-NEXT: addi sp, sp, 16 361; CHECK-NEXT: .cfi_def_cfa_offset 0 362; CHECK-NEXT: ret 363 %v = call <vscale x 32 x i32> @llvm.vp.trunc.nxv32i32.nxv32i64(<vscale x 32 x i64> %a, <vscale x 32 x i1> %m, i32 %vl) 364 ret <vscale x 32 x i32> %v 365} 366