xref: /llvm-project/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssrani.ll (revision f3aa4416319aed198841401c6c9dc2e49afe2507)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
3
4declare <16 x i8> @llvm.loongarch.lsx.vssrani.b.h(<16 x i8>, <16 x i8>, i32)
5
6define <16 x i8> @lsx_vssrani_b_h(<16 x i8> %va, <16 x i8> %vb) nounwind {
7; CHECK-LABEL: lsx_vssrani_b_h:
8; CHECK:       # %bb.0: # %entry
9; CHECK-NEXT:    vssrani.b.h $vr0, $vr1, 1
10; CHECK-NEXT:    ret
11entry:
12  %res = call <16 x i8> @llvm.loongarch.lsx.vssrani.b.h(<16 x i8> %va, <16 x i8> %vb, i32 1)
13  ret <16 x i8> %res
14}
15
16declare <8 x i16> @llvm.loongarch.lsx.vssrani.h.w(<8 x i16>, <8 x i16>, i32)
17
18define <8 x i16> @lsx_vssrani_h_w(<8 x i16> %va, <8 x i16> %vb) nounwind {
19; CHECK-LABEL: lsx_vssrani_h_w:
20; CHECK:       # %bb.0: # %entry
21; CHECK-NEXT:    vssrani.h.w $vr0, $vr1, 1
22; CHECK-NEXT:    ret
23entry:
24  %res = call <8 x i16> @llvm.loongarch.lsx.vssrani.h.w(<8 x i16> %va, <8 x i16> %vb, i32 1)
25  ret <8 x i16> %res
26}
27
28declare <4 x i32> @llvm.loongarch.lsx.vssrani.w.d(<4 x i32>, <4 x i32>, i32)
29
30define <4 x i32> @lsx_vssrani_w_d(<4 x i32> %va, <4 x i32> %vb) nounwind {
31; CHECK-LABEL: lsx_vssrani_w_d:
32; CHECK:       # %bb.0: # %entry
33; CHECK-NEXT:    vssrani.w.d $vr0, $vr1, 1
34; CHECK-NEXT:    ret
35entry:
36  %res = call <4 x i32> @llvm.loongarch.lsx.vssrani.w.d(<4 x i32> %va, <4 x i32> %vb, i32 1)
37  ret <4 x i32> %res
38}
39
40declare <2 x i64> @llvm.loongarch.lsx.vssrani.d.q(<2 x i64>, <2 x i64>, i32)
41
42define <2 x i64> @lsx_vssrani_d_q(<2 x i64> %va, <2 x i64> %vb) nounwind {
43; CHECK-LABEL: lsx_vssrani_d_q:
44; CHECK:       # %bb.0: # %entry
45; CHECK-NEXT:    vssrani.d.q $vr0, $vr1, 1
46; CHECK-NEXT:    ret
47entry:
48  %res = call <2 x i64> @llvm.loongarch.lsx.vssrani.d.q(<2 x i64> %va, <2 x i64> %vb, i32 1)
49  ret <2 x i64> %res
50}
51
52declare <16 x i8> @llvm.loongarch.lsx.vssrani.bu.h(<16 x i8>, <16 x i8>, i32)
53
54define <16 x i8> @lsx_vssrani_bu_h(<16 x i8> %va, <16 x i8> %vb) nounwind {
55; CHECK-LABEL: lsx_vssrani_bu_h:
56; CHECK:       # %bb.0: # %entry
57; CHECK-NEXT:    vssrani.bu.h $vr0, $vr1, 15
58; CHECK-NEXT:    ret
59entry:
60  %res = call <16 x i8> @llvm.loongarch.lsx.vssrani.bu.h(<16 x i8> %va, <16 x i8> %vb, i32 15)
61  ret <16 x i8> %res
62}
63
64declare <8 x i16> @llvm.loongarch.lsx.vssrani.hu.w(<8 x i16>, <8 x i16>, i32)
65
66define <8 x i16> @lsx_vssrani_hu_w(<8 x i16> %va, <8 x i16> %vb) nounwind {
67; CHECK-LABEL: lsx_vssrani_hu_w:
68; CHECK:       # %bb.0: # %entry
69; CHECK-NEXT:    vssrani.hu.w $vr0, $vr1, 31
70; CHECK-NEXT:    ret
71entry:
72  %res = call <8 x i16> @llvm.loongarch.lsx.vssrani.hu.w(<8 x i16> %va, <8 x i16> %vb, i32 31)
73  ret <8 x i16> %res
74}
75
76declare <4 x i32> @llvm.loongarch.lsx.vssrani.wu.d(<4 x i32>, <4 x i32>, i32)
77
78define <4 x i32> @lsx_vssrani_wu_d(<4 x i32> %va, <4 x i32> %vb) nounwind {
79; CHECK-LABEL: lsx_vssrani_wu_d:
80; CHECK:       # %bb.0: # %entry
81; CHECK-NEXT:    vssrani.wu.d $vr0, $vr1, 63
82; CHECK-NEXT:    ret
83entry:
84  %res = call <4 x i32> @llvm.loongarch.lsx.vssrani.wu.d(<4 x i32> %va, <4 x i32> %vb, i32 63)
85  ret <4 x i32> %res
86}
87
88declare <2 x i64> @llvm.loongarch.lsx.vssrani.du.q(<2 x i64>, <2 x i64>, i32)
89
90define <2 x i64> @lsx_vssrani_du_q(<2 x i64> %va, <2 x i64> %vb) nounwind {
91; CHECK-LABEL: lsx_vssrani_du_q:
92; CHECK:       # %bb.0: # %entry
93; CHECK-NEXT:    vssrani.du.q $vr0, $vr1, 127
94; CHECK-NEXT:    ret
95entry:
96  %res = call <2 x i64> @llvm.loongarch.lsx.vssrani.du.q(<2 x i64> %va, <2 x i64> %vb, i32 127)
97  ret <2 x i64> %res
98}
99