xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vnsrl-vp.ll (revision d8d131dfa99762ccdd2116661980b7d0493cd7b5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
3; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
4
5declare <vscale x 1 x i32> @llvm.vp.sext.nxv1i32.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i1>, i32)
6declare <vscale x 1 x i16> @llvm.vp.trunc.nxv1i16.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i1>, i32)
7declare <vscale x 1 x i32> @llvm.vp.lshr.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
8
9define <vscale x 1 x i16> @vsra_vv_nxv1i16(<vscale x 1 x i32> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
10; CHECK-LABEL: vsra_vv_nxv1i16:
11; CHECK:       # %bb.0:
12; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
13; CHECK-NEXT:    vnsrl.wv v8, v8, v9, v0.t
14; CHECK-NEXT:    ret
15  %bext = call <vscale x 1 x i32> @llvm.vp.sext.nxv1i32.nxv1i16(<vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
16  %v = call <vscale x 1 x i32> @llvm.vp.lshr.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %bext, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
17  %vr = call <vscale x 1 x i16> @llvm.vp.trunc.nxv1i16.nxv1i32(<vscale x 1 x i32> %v, <vscale x 1 x i1> %m, i32 %evl)
18  ret <vscale x 1 x i16> %vr
19}
20
21
22define <vscale x 1 x i16> @vsra_vv_nxv1i16_unmasked(<vscale x 1 x i32> %a, <vscale x 1 x i16> %b, i32 zeroext %evl) {
23; CHECK-LABEL: vsra_vv_nxv1i16_unmasked:
24; CHECK:       # %bb.0:
25; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
26; CHECK-NEXT:    vnsrl.wv v8, v8, v9
27; CHECK-NEXT:    ret
28  %bext = call <vscale x 1 x i32> @llvm.vp.sext.nxv1i32.nxv1i16(<vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
29  %v = call <vscale x 1 x i32> @llvm.vp.lshr.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %bext, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
30  %vr = call <vscale x 1 x i16> @llvm.vp.trunc.nxv1i16.nxv1i32(<vscale x 1 x i32> %v, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
31  ret <vscale x 1 x i16> %vr
32}
33
34declare <vscale x 1 x i64> @llvm.vp.sext.nxv1i64.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i1>, i32)
35declare <vscale x 1 x i32> @llvm.vp.trunc.nxv1i32.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i1>, i32)
36declare <vscale x 1 x i64> @llvm.vp.lshr.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
37
38define <vscale x 1 x i32> @vsra_vv_nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
39; CHECK-LABEL: vsra_vv_nxv1i64:
40; CHECK:       # %bb.0:
41; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
42; CHECK-NEXT:    vnsrl.wv v8, v8, v9, v0.t
43; CHECK-NEXT:    ret
44  %bext = call <vscale x 1 x i64> @llvm.vp.sext.nxv1i64.nxv1i32(<vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
45  %v = call <vscale x 1 x i64> @llvm.vp.lshr.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %bext, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
46  %vr = call <vscale x 1 x i32> @llvm.vp.trunc.nxv1i32.nxv1i64(<vscale x 1 x i64> %v, <vscale x 1 x i1> %m, i32 %evl)
47  ret <vscale x 1 x i32> %vr
48}
49
50define <vscale x 1 x i32> @vsra_vv_nxv1i64_unmasked(<vscale x 1 x i64> %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
51; CHECK-LABEL: vsra_vv_nxv1i64_unmasked:
52; CHECK:       # %bb.0:
53; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
54; CHECK-NEXT:    vnsrl.wv v8, v8, v9
55; CHECK-NEXT:    ret
56  %bext = call <vscale x 1 x i64> @llvm.vp.sext.nxv1i64.nxv1i32(<vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
57  %v = call <vscale x 1 x i64> @llvm.vp.lshr.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %bext, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
58  %vr = call <vscale x 1 x i32> @llvm.vp.trunc.nxv1i32.nxv1i64(<vscale x 1 x i64> %v, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
59  ret <vscale x 1 x i32> %vr
60}
61