xref: /llvm-project/llvm/test/CodeGen/RISCV/and-add-lsr.ll (revision e722d9662dd8cdd3be9e434b057593e97a7d4417)
1cea980f3SWANG Rui; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2*e722d966SPhilip Reames; RUN: llc -mtriple=riscv32 -verify-machineinstrs -mattr=+v < %s \
3cea980f3SWANG Rui; RUN:   | FileCheck %s -check-prefix=RV32I
4*e722d966SPhilip Reames; RUN: llc -mtriple=riscv64 -verify-machineinstrs -mattr=+v < %s \
5cea980f3SWANG Rui; RUN:   | FileCheck %s -check-prefix=RV64I
6cea980f3SWANG Rui
7cea980f3SWANG Ruidefine i32 @and_add_lsr(i32 %x, i32 %y) {
8cea980f3SWANG Rui; RV32I-LABEL: and_add_lsr:
9cea980f3SWANG Rui; RV32I:       # %bb.0:
10595d5f36SWANG Rui; RV32I-NEXT:    addi a0, a0, -1
11cea980f3SWANG Rui; RV32I-NEXT:    srli a1, a1, 20
12cea980f3SWANG Rui; RV32I-NEXT:    and a0, a1, a0
13cea980f3SWANG Rui; RV32I-NEXT:    ret
14cea980f3SWANG Rui;
15cea980f3SWANG Rui; RV64I-LABEL: and_add_lsr:
16cea980f3SWANG Rui; RV64I:       # %bb.0:
17595d5f36SWANG Rui; RV64I-NEXT:    addiw a0, a0, -1
18cea980f3SWANG Rui; RV64I-NEXT:    srliw a1, a1, 20
19cea980f3SWANG Rui; RV64I-NEXT:    and a0, a1, a0
20cea980f3SWANG Rui; RV64I-NEXT:    ret
21cea980f3SWANG Rui  %1 = add i32 %x, 4095
22cea980f3SWANG Rui  %2 = lshr i32 %y, 20
23cea980f3SWANG Rui  %r = and i32 %2, %1
24cea980f3SWANG Rui  ret i32 %r
25cea980f3SWANG Rui}
26*e722d966SPhilip Reames
27*e722d966SPhilip Reames; Make sure we don't crash on fixed length vectors
28*e722d966SPhilip Reamesdefine <2 x i32> @and_add_lsr_vec(<2 x i32> %x, <2 x i32> %y) {
29*e722d966SPhilip Reames; RV32I-LABEL: and_add_lsr_vec:
30*e722d966SPhilip Reames; RV32I:       # %bb.0:
31*e722d966SPhilip Reames; RV32I-NEXT:    lui a0, 1
32*e722d966SPhilip Reames; RV32I-NEXT:    addi a0, a0, -1
33*e722d966SPhilip Reames; RV32I-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
34*e722d966SPhilip Reames; RV32I-NEXT:    vadd.vx v8, v8, a0
35*e722d966SPhilip Reames; RV32I-NEXT:    vsrl.vi v9, v9, 20
36*e722d966SPhilip Reames; RV32I-NEXT:    vand.vv v8, v9, v8
37*e722d966SPhilip Reames; RV32I-NEXT:    ret
38*e722d966SPhilip Reames;
39*e722d966SPhilip Reames; RV64I-LABEL: and_add_lsr_vec:
40*e722d966SPhilip Reames; RV64I:       # %bb.0:
41*e722d966SPhilip Reames; RV64I-NEXT:    lui a0, 1
42*e722d966SPhilip Reames; RV64I-NEXT:    addi a0, a0, -1
43*e722d966SPhilip Reames; RV64I-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
44*e722d966SPhilip Reames; RV64I-NEXT:    vadd.vx v8, v8, a0
45*e722d966SPhilip Reames; RV64I-NEXT:    vsrl.vi v9, v9, 20
46*e722d966SPhilip Reames; RV64I-NEXT:    vand.vv v8, v9, v8
47*e722d966SPhilip Reames; RV64I-NEXT:    ret
48*e722d966SPhilip Reames  %1 = add <2 x i32> %x, splat (i32 4095)
49*e722d966SPhilip Reames  %2 = lshr <2 x i32> %y, splat (i32 20)
50*e722d966SPhilip Reames  %r = and <2 x i32> %2, %1
51*e722d966SPhilip Reames  ret <2 x i32> %r
52*e722d966SPhilip Reames}
53*e722d966SPhilip Reames
54*e722d966SPhilip Reames; Make sure we don't crash on scalable vectors
55*e722d966SPhilip Reamesdefine <vscale x 2 x i32> @and_add_lsr_vec2(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y) {
56*e722d966SPhilip Reames; RV32I-LABEL: and_add_lsr_vec2:
57*e722d966SPhilip Reames; RV32I:       # %bb.0:
58*e722d966SPhilip Reames; RV32I-NEXT:    lui a0, 1
59*e722d966SPhilip Reames; RV32I-NEXT:    addi a0, a0, -1
60*e722d966SPhilip Reames; RV32I-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
61*e722d966SPhilip Reames; RV32I-NEXT:    vadd.vx v8, v8, a0
62*e722d966SPhilip Reames; RV32I-NEXT:    vsrl.vi v9, v9, 20
63*e722d966SPhilip Reames; RV32I-NEXT:    vand.vv v8, v9, v8
64*e722d966SPhilip Reames; RV32I-NEXT:    ret
65*e722d966SPhilip Reames;
66*e722d966SPhilip Reames; RV64I-LABEL: and_add_lsr_vec2:
67*e722d966SPhilip Reames; RV64I:       # %bb.0:
68*e722d966SPhilip Reames; RV64I-NEXT:    lui a0, 1
69*e722d966SPhilip Reames; RV64I-NEXT:    addi a0, a0, -1
70*e722d966SPhilip Reames; RV64I-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
71*e722d966SPhilip Reames; RV64I-NEXT:    vadd.vx v8, v8, a0
72*e722d966SPhilip Reames; RV64I-NEXT:    vsrl.vi v9, v9, 20
73*e722d966SPhilip Reames; RV64I-NEXT:    vand.vv v8, v9, v8
74*e722d966SPhilip Reames; RV64I-NEXT:    ret
75*e722d966SPhilip Reames  %1 = add <vscale x 2 x i32> %x, splat (i32 4095)
76*e722d966SPhilip Reames  %2 = lshr <vscale x 2 x i32> %y, splat (i32 20)
77*e722d966SPhilip Reames  %r = and <vscale x 2 x i32> %2, %1
78*e722d966SPhilip Reames  ret <vscale x 2 x i32> %r
79*e722d966SPhilip Reames}
80