xref: /llvm-project/llvm/test/CodeGen/LoongArch/bswap-bitreverse.ll (revision 9d4f7f44b64d87d1068859906f43b7ce03a7388b)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc --mtriple=loongarch32 -mattr=+d --verify-machineinstrs < %s \
3; RUN:   | FileCheck %s --check-prefix=LA32
4; RUN: llc --mtriple=loongarch64 -mattr=+d --verify-machineinstrs < %s \
5; RUN:   | FileCheck %s --check-prefix=LA64
6
7declare i16 @llvm.bitreverse.i16(i16)
8declare i32 @llvm.bitreverse.i32(i32)
9declare i64 @llvm.bitreverse.i64(i64)
10declare i16 @llvm.bswap.i16(i16)
11declare i32 @llvm.bswap.i32(i32)
12declare i64 @llvm.bswap.i64(i64)
13
14define i16 @test_bswap_bitreverse_i16(i16 %a) nounwind {
15; LA32-LABEL: test_bswap_bitreverse_i16:
16; LA32:       # %bb.0:
17; LA32-NEXT:    revb.2h $a0, $a0
18; LA32-NEXT:    bitrev.w $a0, $a0
19; LA32-NEXT:    srli.w $a0, $a0, 16
20; LA32-NEXT:    ret
21;
22; LA64-LABEL: test_bswap_bitreverse_i16:
23; LA64:       # %bb.0:
24; LA64-NEXT:    revb.2h $a0, $a0
25; LA64-NEXT:    bitrev.d $a0, $a0
26; LA64-NEXT:    srli.d $a0, $a0, 48
27; LA64-NEXT:    ret
28  %tmp = call i16 @llvm.bswap.i16(i16 %a)
29  %tmp2 = call i16 @llvm.bitreverse.i16(i16 %tmp)
30  ret i16 %tmp2
31}
32
33define i32 @test_bswap_bitreverse_i32(i32 %a) nounwind {
34; LA32-LABEL: test_bswap_bitreverse_i32:
35; LA32:       # %bb.0:
36; LA32-NEXT:    bitrev.4b $a0, $a0
37; LA32-NEXT:    ret
38;
39; LA64-LABEL: test_bswap_bitreverse_i32:
40; LA64:       # %bb.0:
41; LA64-NEXT:    bitrev.4b $a0, $a0
42; LA64-NEXT:    ret
43  %tmp = call i32 @llvm.bswap.i32(i32 %a)
44  %tmp2 = call i32 @llvm.bitreverse.i32(i32 %tmp)
45  ret i32 %tmp2
46}
47
48define i64 @test_bswap_bitreverse_i64(i64 %a) nounwind {
49; LA32-LABEL: test_bswap_bitreverse_i64:
50; LA32:       # %bb.0:
51; LA32-NEXT:    bitrev.4b $a0, $a0
52; LA32-NEXT:    bitrev.4b $a1, $a1
53; LA32-NEXT:    ret
54;
55; LA64-LABEL: test_bswap_bitreverse_i64:
56; LA64:       # %bb.0:
57; LA64-NEXT:    bitrev.8b $a0, $a0
58; LA64-NEXT:    ret
59  %tmp = call i64 @llvm.bswap.i64(i64 %a)
60  %tmp2 = call i64 @llvm.bitreverse.i64(i64 %tmp)
61  ret i64 %tmp2
62}
63
64define i16 @test_bitreverse_bswap_i16(i16 %a) nounwind {
65; LA32-LABEL: test_bitreverse_bswap_i16:
66; LA32:       # %bb.0:
67; LA32-NEXT:    revb.2h $a0, $a0
68; LA32-NEXT:    bitrev.w $a0, $a0
69; LA32-NEXT:    srli.w $a0, $a0, 16
70; LA32-NEXT:    ret
71;
72; LA64-LABEL: test_bitreverse_bswap_i16:
73; LA64:       # %bb.0:
74; LA64-NEXT:    revb.2h $a0, $a0
75; LA64-NEXT:    bitrev.d $a0, $a0
76; LA64-NEXT:    srli.d $a0, $a0, 48
77; LA64-NEXT:    ret
78  %tmp = call i16 @llvm.bitreverse.i16(i16 %a)
79  %tmp2 = call i16 @llvm.bswap.i16(i16 %tmp)
80  ret i16 %tmp2
81}
82
83define i32 @test_bitreverse_bswap_i32(i32 %a) nounwind {
84; LA32-LABEL: test_bitreverse_bswap_i32:
85; LA32:       # %bb.0:
86; LA32-NEXT:    bitrev.4b $a0, $a0
87; LA32-NEXT:    ret
88;
89; LA64-LABEL: test_bitreverse_bswap_i32:
90; LA64:       # %bb.0:
91; LA64-NEXT:    bitrev.4b $a0, $a0
92; LA64-NEXT:    ret
93  %tmp = call i32 @llvm.bitreverse.i32(i32 %a)
94  %tmp2 = call i32 @llvm.bswap.i32(i32 %tmp)
95  ret i32 %tmp2
96}
97
98define i64 @test_bitreverse_bswap_i64(i64 %a) nounwind {
99; LA32-LABEL: test_bitreverse_bswap_i64:
100; LA32:       # %bb.0:
101; LA32-NEXT:    bitrev.4b $a0, $a0
102; LA32-NEXT:    bitrev.4b $a1, $a1
103; LA32-NEXT:    ret
104;
105; LA64-LABEL: test_bitreverse_bswap_i64:
106; LA64:       # %bb.0:
107; LA64-NEXT:    bitrev.8b $a0, $a0
108; LA64-NEXT:    ret
109  %tmp = call i64 @llvm.bitreverse.i64(i64 %a)
110  %tmp2 = call i64 @llvm.bswap.i64(i64 %tmp)
111  ret i64 %tmp2
112}
113
114define i32 @pr55484(i32 %0) {
115; LA32-LABEL: pr55484:
116; LA32:       # %bb.0:
117; LA32-NEXT:    srli.w $a1, $a0, 8
118; LA32-NEXT:    slli.w $a0, $a0, 8
119; LA32-NEXT:    or $a0, $a1, $a0
120; LA32-NEXT:    ext.w.h $a0, $a0
121; LA32-NEXT:    ret
122;
123; LA64-LABEL: pr55484:
124; LA64:       # %bb.0:
125; LA64-NEXT:    srli.d $a1, $a0, 8
126; LA64-NEXT:    slli.d $a0, $a0, 8
127; LA64-NEXT:    or $a0, $a1, $a0
128; LA64-NEXT:    ext.w.h $a0, $a0
129; LA64-NEXT:    ret
130  %2 = lshr i32 %0, 8
131  %3 = shl i32 %0, 8
132  %4 = or i32 %2, %3
133  %5 = trunc i32 %4 to i16
134  %6 = sext i16 %5 to i32
135  ret i32 %6
136}
137