xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/combine-splats.ll (revision d8d131dfa99762ccdd2116661980b7d0493cd7b5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+m,+v < %s | FileCheck %s
3; RUN: llc -mtriple=riscv64 -mattr=+m,+v < %s | FileCheck %s
4
5; fold (and (or x, C), D) -> D if (C & D) == D
6
7define <vscale x 4 x i32> @and_or_nxv4i32(<vscale x 4 x i32> %A) {
8; CHECK-LABEL: and_or_nxv4i32:
9; CHECK:       # %bb.0:
10; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
11; CHECK-NEXT:    vmv.v.i v8, 8
12; CHECK-NEXT:    ret
13  %v1 = or <vscale x 4 x i32> %A, splat (i32 255)
14  %v2 = and <vscale x 4 x i32> %v1, splat (i32 8)
15  ret <vscale x 4 x i32> %v2
16}
17
18; (or (and X, c1), c2) -> (and (or X, c2), c1|c2) iff (c1 & c2) != 0
19
20define <vscale x 2 x i64> @or_and_nxv2i64(<vscale x 2 x i64> %a0) {
21; CHECK-LABEL: or_and_nxv2i64:
22; CHECK:       # %bb.0:
23; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
24; CHECK-NEXT:    vor.vi v8, v8, 3
25; CHECK-NEXT:    vand.vi v8, v8, 7
26; CHECK-NEXT:    ret
27  %v1 = and <vscale x 2 x i64> %a0, splat (i64 7)
28  %v2 = or <vscale x 2 x i64> %v1, splat (i64 3)
29  ret <vscale x 2 x i64> %v2
30}
31
32; If all masked bits are going to be set, that's a constant fold.
33
34define <vscale x 2 x i64> @or_and_nxv2i64_fold(<vscale x 2 x i64> %a0) {
35; CHECK-LABEL: or_and_nxv2i64_fold:
36; CHECK:       # %bb.0:
37; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
38; CHECK-NEXT:    vmv.v.i v8, 3
39; CHECK-NEXT:    ret
40  %v1 = and <vscale x 2 x i64> %a0, splat (i64 1)
41  %v2 = or <vscale x 2 x i64> %v1, splat (i64 3)
42  ret <vscale x 2 x i64> %v2
43}
44
45; fold (shl (shl x, c1), c2) -> (shl x, (add c1, c2))
46
47define <vscale x 4 x i32> @combine_vec_shl_shl(<vscale x 4 x i32> %x) {
48; CHECK-LABEL: combine_vec_shl_shl:
49; CHECK:       # %bb.0:
50; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
51; CHECK-NEXT:    vsll.vi v8, v8, 6
52; CHECK-NEXT:    ret
53  %v1 = shl <vscale x 4 x i32> %x, splat (i32 2)
54  %v2 = shl <vscale x 4 x i32> %v1, splat (i32 4)
55  ret <vscale x 4 x i32> %v2
56}
57
58; fold (sra (sra x, c1), c2) -> (sra x, (add c1, c2))
59
60define <vscale x 2 x i32> @combine_vec_ashr_ashr(<vscale x 2 x i32> %x) {
61; CHECK-LABEL: combine_vec_ashr_ashr:
62; CHECK:       # %bb.0:
63; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
64; CHECK-NEXT:    vsra.vi v8, v8, 6
65; CHECK-NEXT:    ret
66  %v1 = ashr <vscale x 2 x i32> %x, splat (i32 2)
67  %v2 = ashr <vscale x 2 x i32> %v1, splat (i32 4)
68  ret <vscale x 2 x i32> %v2
69}
70
71; fold (srl (srl x, c1), c2) -> (srl x, (add c1, c2))
72
73define <vscale x 8 x i16> @combine_vec_lshr_lshr(<vscale x 8 x i16> %x) {
74; CHECK-LABEL: combine_vec_lshr_lshr:
75; CHECK:       # %bb.0:
76; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
77; CHECK-NEXT:    vsrl.vi v8, v8, 8
78; CHECK-NEXT:    ret
79  %v1 = lshr <vscale x 8 x i16> %x, splat (i16 4)
80  %v2 = lshr <vscale x 8 x i16> %v1, splat (i16 4)
81  ret <vscale x 8 x i16> %v2
82}
83
84; fold (fmul x, 1.0) -> x
85define <vscale x 2 x float> @combine_fmul_one(<vscale x 2 x float> %x) {
86; CHECK-LABEL: combine_fmul_one:
87; CHECK:       # %bb.0:
88; CHECK-NEXT:    ret
89  %v = fmul <vscale x 2 x float> %x, splat (float 1.0)
90  ret <vscale x 2 x float> %v
91}
92
93; fold (fmul 1.0, x) -> x
94define <vscale x 2 x float> @combine_fmul_one_commuted(<vscale x 2 x float> %x) {
95; CHECK-LABEL: combine_fmul_one_commuted:
96; CHECK:       # %bb.0:
97; CHECK-NEXT:    ret
98  %v = fmul <vscale x 2 x float> splat (float 1.0), %x
99  ret <vscale x 2 x float> %v
100}
101