xref: /llvm-project/llvm/test/CodeGen/AArch64/sve-vscale-combine.ll (revision fc2b4dfef2f1c4d04d0d5d25e14ffaa0622d5189)
1; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve --asm-verbose=false < %s |FileCheck %s
2
3declare i32 @llvm.vscale.i32()
4declare i64 @llvm.vscale.i64()
5
6; Fold (add (vscale * C0), (vscale * C1)) to (vscale * (C0 + C1)).
7define i64 @combine_add_vscale_i64() nounwind {
8; CHECK-LABEL: combine_add_vscale_i64:
9; CHECK-NOT:   add
10; CHECK-NEXT:  cntd  x0
11; CHECK-NEXT:  ret
12 %vscale = call i64 @llvm.vscale.i64()
13 %add = add i64 %vscale, %vscale
14 ret i64 %add
15}
16
17define i32 @combine_add_vscale_i32() nounwind {
18; CHECK-LABEL: combine_add_vscale_i32:
19; CHECK-NOT:   add
20; CHECK-NEXT:  cntd  x0
21; CHECK-NEXT:  ret
22 %vscale = call i32 @llvm.vscale.i32()
23 %add = add i32 %vscale, %vscale
24 ret i32 %add
25}
26
27; Fold (mul (vscale * C0), C1) to (vscale * (C0 * C1)).
28; In this test, C0 = 1, C1 = 32.
29define i64 @combine_mul_vscale_i64() nounwind {
30; CHECK-LABEL: combine_mul_vscale_i64:
31; CHECK-NOT:   mul
32; CHECK-NEXT:  rdvl  x0, #2
33; CHECK-NEXT:  ret
34 %vscale = call i64 @llvm.vscale.i64()
35 %mul = mul i64 %vscale, 32
36 ret i64 %mul
37}
38
39define i32 @combine_mul_vscale_i32() nounwind {
40; CHECK-LABEL: combine_mul_vscale_i32:
41; CHECK-NOT:   mul
42; CHECK-NEXT:  rdvl  x0, #3
43; CHECK-NEXT:  ret
44 %vscale = call i32 @llvm.vscale.i32()
45 %mul = mul i32 %vscale, 48
46 ret i32 %mul
47}
48
49; Canonicalize (sub X, (vscale * C)) to (add X,  (vscale * -C))
50define i64 @combine_sub_vscale_i64(i64 %in) nounwind {
51; CHECK-LABEL: combine_sub_vscale_i64:
52; CHECK-NOT:   sub
53; CHECK-NEXT:  rdvl  x8, #-1
54; CHECK-NEXT:  asr   x8, x8, #4
55; CHECK-NEXT:  add   x0, x0, x8
56; CHECK-NEXT:  ret
57 %vscale = call i64 @llvm.vscale.i64()
58 %sub = sub i64 %in,  %vscale
59 ret i64 %sub
60}
61
62define i32 @combine_sub_vscale_i32(i32 %in) nounwind {
63; CHECK-LABEL: combine_sub_vscale_i32:
64; CHECK-NOT:   sub
65; CHECK-NEXT:  rdvl  x8, #-1
66; CHECK-NEXT:  asr   x8, x8, #4
67; CHECK-NEXT:  add   w0, w0, w8
68; CHECK-NEXT:  ret
69 %vscale = call i32 @llvm.vscale.i32()
70 %sub = sub i32 %in, %vscale
71 ret i32 %sub
72}
73
74; Tests of multiple uses of vscale when canonicalize
75; (sub X, (vscale * C)) to (add X,  (vscale * -C))
76define i64 @multiple_uses_sub_vscale_i64(i64 %x, i64 %y) nounwind {
77; CHECK-LABEL: multiple_uses_sub_vscale_i64:
78; CHECK-NEXT:  rdvl	x8, #1
79; CHECK-NEXT:  lsr	x8, x8, #4
80; CHECK-NEXT:  sub	x9, x0, x8
81; CHECK-NEXT:  add	x8, x1, x8
82; CHECK-NEXT:  mul	x0, x9, x8
83; CHECK-NEXT:  ret
84 %vscale = call i64 @llvm.vscale.i64()
85 %sub = sub i64 %x, %vscale
86 %add = add i64 %y, %vscale
87 %res = mul i64 %sub, %add
88 ret i64 %res
89}
90
91; Fold (shl (vscale * C0), C1) to (vscale * (C0 << C1)).
92; C0 = 1 , C1 = 4
93; At IR level,  %shl = 2^4 * VSCALE.
94; At Assembly level, the output of RDVL is also 2^4 * VSCALE.
95; Hence, the immediate for RDVL is #1.
96define i64 @combine_shl_vscale_i64() nounwind {
97; CHECK-LABEL: combine_shl_vscale_i64:
98; CHECK-NOT:   shl
99; CHECK-NEXT:  rdvl  x0, #1
100; CHECK-NEXT:  ret
101 %vscale = call i64 @llvm.vscale.i64()
102 %shl = shl i64 %vscale, 4
103 ret i64 %shl
104}
105
106define i32 @combine_shl_vscale_i32() nounwind {
107; CHECK-LABEL: combine_shl_vscale_i32:
108; CHECK-NOT:   shl
109; CHECK-NEXT:  rdvl  x0, #1
110; CHECK-NEXT:  ret
111 %vscale = call i32 @llvm.vscale.i32()
112 %shl = shl i32 %vscale, 4
113 ret i32 %shl
114}
115