xref: /llvm-project/llvm/test/CodeGen/RISCV/and-add-lsr.ll (revision e722d9662dd8cdd3be9e434b057593e97a7d4417)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -verify-machineinstrs -mattr=+v < %s \
3; RUN:   | FileCheck %s -check-prefix=RV32I
4; RUN: llc -mtriple=riscv64 -verify-machineinstrs -mattr=+v < %s \
5; RUN:   | FileCheck %s -check-prefix=RV64I
6
7define i32 @and_add_lsr(i32 %x, i32 %y) {
8; RV32I-LABEL: and_add_lsr:
9; RV32I:       # %bb.0:
10; RV32I-NEXT:    addi a0, a0, -1
11; RV32I-NEXT:    srli a1, a1, 20
12; RV32I-NEXT:    and a0, a1, a0
13; RV32I-NEXT:    ret
14;
15; RV64I-LABEL: and_add_lsr:
16; RV64I:       # %bb.0:
17; RV64I-NEXT:    addiw a0, a0, -1
18; RV64I-NEXT:    srliw a1, a1, 20
19; RV64I-NEXT:    and a0, a1, a0
20; RV64I-NEXT:    ret
21  %1 = add i32 %x, 4095
22  %2 = lshr i32 %y, 20
23  %r = and i32 %2, %1
24  ret i32 %r
25}
26
27; Make sure we don't crash on fixed length vectors
28define <2 x i32> @and_add_lsr_vec(<2 x i32> %x, <2 x i32> %y) {
29; RV32I-LABEL: and_add_lsr_vec:
30; RV32I:       # %bb.0:
31; RV32I-NEXT:    lui a0, 1
32; RV32I-NEXT:    addi a0, a0, -1
33; RV32I-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
34; RV32I-NEXT:    vadd.vx v8, v8, a0
35; RV32I-NEXT:    vsrl.vi v9, v9, 20
36; RV32I-NEXT:    vand.vv v8, v9, v8
37; RV32I-NEXT:    ret
38;
39; RV64I-LABEL: and_add_lsr_vec:
40; RV64I:       # %bb.0:
41; RV64I-NEXT:    lui a0, 1
42; RV64I-NEXT:    addi a0, a0, -1
43; RV64I-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
44; RV64I-NEXT:    vadd.vx v8, v8, a0
45; RV64I-NEXT:    vsrl.vi v9, v9, 20
46; RV64I-NEXT:    vand.vv v8, v9, v8
47; RV64I-NEXT:    ret
48  %1 = add <2 x i32> %x, splat (i32 4095)
49  %2 = lshr <2 x i32> %y, splat (i32 20)
50  %r = and <2 x i32> %2, %1
51  ret <2 x i32> %r
52}
53
54; Make sure we don't crash on scalable vectors
55define <vscale x 2 x i32> @and_add_lsr_vec2(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y) {
56; RV32I-LABEL: and_add_lsr_vec2:
57; RV32I:       # %bb.0:
58; RV32I-NEXT:    lui a0, 1
59; RV32I-NEXT:    addi a0, a0, -1
60; RV32I-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
61; RV32I-NEXT:    vadd.vx v8, v8, a0
62; RV32I-NEXT:    vsrl.vi v9, v9, 20
63; RV32I-NEXT:    vand.vv v8, v9, v8
64; RV32I-NEXT:    ret
65;
66; RV64I-LABEL: and_add_lsr_vec2:
67; RV64I:       # %bb.0:
68; RV64I-NEXT:    lui a0, 1
69; RV64I-NEXT:    addi a0, a0, -1
70; RV64I-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
71; RV64I-NEXT:    vadd.vx v8, v8, a0
72; RV64I-NEXT:    vsrl.vi v9, v9, 20
73; RV64I-NEXT:    vand.vv v8, v9, v8
74; RV64I-NEXT:    ret
75  %1 = add <vscale x 2 x i32> %x, splat (i32 4095)
76  %2 = lshr <vscale x 2 x i32> %y, splat (i32 20)
77  %r = and <vscale x 2 x i32> %2, %1
78  ret <vscale x 2 x i32> %r
79}
80