xref: /llvm-project/llvm/test/CodeGen/AArch64/branch-relax-b.ll (revision 866ae69cfa73ce224944c64965d0426637e31517)
1; RUN: llc < %s -mtriple=aarch64-none-linux-gnu --verify-machineinstrs -aarch64-b-offset-bits=9 -aarch64-tbz-offset-bits=6 -aarch64-cbz-offset-bits=6 -aarch64-bcc-offset-bits=6 | FileCheck %s
2
3define void @relax_b_nospill(i1 zeroext %0) {
4; CHECK-LABEL: relax_b_nospill:
5; CHECK:       // %bb.0:                               // %entry
6; CHECK-NEXT:    tbnz w0,
7; CHECK-SAME:                 LBB0_1
8; CHECK-NEXT:  // %bb.3:                               // %entry
9; CHECK-NEXT:          b      .LBB0_2
10; CHECK-NEXT:  .LBB0_1:                                // %iftrue
11; CHECK-NEXT:          //APP
12; CHECK-NEXT:          .zero   2048
13; CHECK-NEXT:          //NO_APP
14; CHECK-NEXT:          ret
15; CHECK-NEXT:  .LBB0_2:                                // %iffalse
16; CHECK-NEXT:          //APP
17; CHECK-NEXT:          .zero   8
18; CHECK-NEXT:          //NO_APP
19; CHECK-NEXT:          ret
20entry:
21  br i1 %0, label %iftrue, label %iffalse
22
23iftrue:
24  call void asm sideeffect ".space 2048", ""()
25  ret void
26
27iffalse:
28  call void asm sideeffect ".space 8", ""()
29  ret void
30}
31
32define void @relax_b_spill() {
33; CHECK-LABEL:    relax_b_spill:                          // @relax_b_spill
34; CHECK:          // %bb.0:                               // %entry
35; CHECK-COUNT-5:          // 16-byte Folded Spill
36; CHECK-NOT:              // 16-byte Folded Spill
37; CHECK:                  //APP
38; CHECK-COUNT-29:         mov     {{x[0-9]+}},
39; CHECK-NOT:              mov     {{x[0-9]+}},
40; CHECK-NEXT:             //NO_APP
41; CHECK-NEXT:             b.eq    .LBB1_1
42; CHECK-NEXT:     // %bb.4:                               // %entry
43; CHECK-NEXT:             str     [[SPILL_REGISTER:x[0-9]+]], [sp,
44; CHECK-SAME:                                                       -16]!
45; CHECK-NEXT:             b       .LBB1_5
46; CHECK-NEXT:     .LBB1_1:                                // %iftrue
47; CHECK-NEXT:             //APP
48; CHECK-NEXT:             .zero   2048
49; CHECK-NEXT:             //NO_APP
50; CHECK-NEXT:             b       .LBB1_3
51; CHECK-NEXT:     .LBB1_5:                                // %iffalse
52; CHECK-NEXT:             ldr     [[SPILL_REGISTER]], [sp],
53; CHECK-SAME:                                                        16
54; CHECK-NEXT:     // %bb.2:                               // %iffalse
55; CHECK-NEXT:             //APP
56; CHECK-COUNT-29:         // reg use {{x[0-9]+}}
57; CHECK-NOT:              // reg use {{x[0-9]+}}
58; CHECK-NEXT:             //NO_APP
59; CHECK-NEXT:     .LBB1_3:                                // %common.ret
60; CHECK-COUNT-5:          // 16-byte Folded Reload
61; CHECK-NOT:              // 16-byte Folded Reload
62; CHECK-NEXT:             ret
63entry:
64  %x0 = call i64 asm sideeffect "mov x0, 1", "={x0}"()
65  %x1 = call i64 asm sideeffect "mov x1, 1", "={x1}"()
66  %x2 = call i64 asm sideeffect "mov x2, 1", "={x2}"()
67  %x3 = call i64 asm sideeffect "mov x3, 1", "={x3}"()
68  %x4 = call i64 asm sideeffect "mov x4, 1", "={x4}"()
69  %x5 = call i64 asm sideeffect "mov x5, 1", "={x5}"()
70  %x6 = call i64 asm sideeffect "mov x6, 1", "={x6}"()
71  %x7 = call i64 asm sideeffect "mov x7, 1", "={x7}"()
72  %x8 = call i64 asm sideeffect "mov x8, 1", "={x8}"()
73  %x9 = call i64 asm sideeffect "mov x9, 1", "={x9}"()
74  %x10 = call i64 asm sideeffect "mov x10, 1", "={x10}"()
75  %x11 = call i64 asm sideeffect "mov x11, 1", "={x11}"()
76  %x12 = call i64 asm sideeffect "mov x12, 1", "={x12}"()
77  %x13 = call i64 asm sideeffect "mov x13, 1", "={x13}"()
78  %x14 = call i64 asm sideeffect "mov x14, 1", "={x14}"()
79  %x15 = call i64 asm sideeffect "mov x15, 1", "={x15}"()
80  %x16 = call i64 asm sideeffect "mov x16, 1", "={x16}"()
81  %x17 = call i64 asm sideeffect "mov x17, 1", "={x17}"()
82  %x18 = call i64 asm sideeffect "mov x18, 1", "={x18}"()
83  %x19 = call i64 asm sideeffect "mov x19, 1", "={x19}"()
84  %x20 = call i64 asm sideeffect "mov x20, 1", "={x20}"()
85  %x21 = call i64 asm sideeffect "mov x21, 1", "={x21}"()
86  %x22 = call i64 asm sideeffect "mov x22, 1", "={x22}"()
87  %x23 = call i64 asm sideeffect "mov x23, 1", "={x23}"()
88  %x24 = call i64 asm sideeffect "mov x24, 1", "={x24}"()
89  %x25 = call i64 asm sideeffect "mov x25, 1", "={x25}"()
90  %x26 = call i64 asm sideeffect "mov x26, 1", "={x26}"()
91  %x27 = call i64 asm sideeffect "mov x27, 1", "={x27}"()
92  %x28 = call i64 asm sideeffect "mov x28, 1", "={x28}"()
93
94  %cmp = icmp eq i64 %x16, %x15
95  br i1 %cmp, label %iftrue, label %iffalse
96
97iftrue:
98  call void asm sideeffect ".space 2048", ""()
99  ret void
100
101iffalse:
102  call void asm sideeffect "# reg use $0", "{x0}"(i64 %x0)
103  call void asm sideeffect "# reg use $0", "{x1}"(i64 %x1)
104  call void asm sideeffect "# reg use $0", "{x2}"(i64 %x2)
105  call void asm sideeffect "# reg use $0", "{x3}"(i64 %x3)
106  call void asm sideeffect "# reg use $0", "{x4}"(i64 %x4)
107  call void asm sideeffect "# reg use $0", "{x5}"(i64 %x5)
108  call void asm sideeffect "# reg use $0", "{x6}"(i64 %x6)
109  call void asm sideeffect "# reg use $0", "{x7}"(i64 %x7)
110  call void asm sideeffect "# reg use $0", "{x8}"(i64 %x8)
111  call void asm sideeffect "# reg use $0", "{x9}"(i64 %x9)
112  call void asm sideeffect "# reg use $0", "{x10}"(i64 %x10)
113  call void asm sideeffect "# reg use $0", "{x11}"(i64 %x11)
114  call void asm sideeffect "# reg use $0", "{x12}"(i64 %x12)
115  call void asm sideeffect "# reg use $0", "{x13}"(i64 %x13)
116  call void asm sideeffect "# reg use $0", "{x14}"(i64 %x14)
117  call void asm sideeffect "# reg use $0", "{x15}"(i64 %x15)
118  call void asm sideeffect "# reg use $0", "{x16}"(i64 %x16)
119  call void asm sideeffect "# reg use $0", "{x17}"(i64 %x17)
120  call void asm sideeffect "# reg use $0", "{x18}"(i64 %x18)
121  call void asm sideeffect "# reg use $0", "{x19}"(i64 %x19)
122  call void asm sideeffect "# reg use $0", "{x20}"(i64 %x20)
123  call void asm sideeffect "# reg use $0", "{x21}"(i64 %x21)
124  call void asm sideeffect "# reg use $0", "{x22}"(i64 %x22)
125  call void asm sideeffect "# reg use $0", "{x23}"(i64 %x23)
126  call void asm sideeffect "# reg use $0", "{x24}"(i64 %x24)
127  call void asm sideeffect "# reg use $0", "{x25}"(i64 %x25)
128  call void asm sideeffect "# reg use $0", "{x26}"(i64 %x26)
129  call void asm sideeffect "# reg use $0", "{x27}"(i64 %x27)
130  call void asm sideeffect "# reg use $0", "{x28}"(i64 %x28)
131  ret void
132}
133
134define void @relax_b_x16_taken() {
135; CHECK-LABEL:    relax_b_x16_taken:                      // @relax_b_x16_taken
136; COM: Since the source of the out-of-range branch is hot and x16 is
137; COM: taken, it makes sense to spill x16 and let the linker insert
138; COM: fixup code for this branch rather than inflating the hot code
139; COM: size by eagerly relaxing the unconditional branch.
140; CHECK:          // %bb.0:                               // %entry
141; CHECK-NEXT:             //APP
142; CHECK-NEXT:             mov     x16, #1
143; CHECK-NEXT:             //NO_APP
144; CHECK-NEXT:             cbnz    x16, .LBB2_1
145; CHECK-NEXT:     // %bb.3:                               // %entry
146; CHECK-NEXT:             str     [[SPILL_REGISTER]], [sp,
147; CHECK-SAME:                                                       -16]!
148; CHECK-NEXT:             b       .LBB2_4
149; CHECK-NEXT:     .LBB2_1:                                // %iftrue
150; CHECK-NEXT:             //APP
151; CHECK-NEXT:             .zero   2048
152; CHECK-NEXT:             //NO_APP
153; CHECK-NEXT:             ret
154; CHECK-NEXT:     .LBB2_4:                                // %iffalse
155; CHECK-NEXT:             ldr     [[SPILL_REGISTER]], [sp],
156; CHECK-SAME:                                                        16
157; CHECK-NEXT:     // %bb.2:                               // %iffalse
158; CHECK-NEXT:             //APP
159; CHECK-NEXT:             // reg use x16
160; CHECK-NEXT:             //NO_APP
161; CHECK-NEXT:             ret
162entry:
163  %x16 = call i64 asm sideeffect "mov x16, 1", "={x16}"()
164
165  %cmp = icmp eq i64 %x16, 0
166  br i1 %cmp, label %iffalse, label %iftrue
167
168iftrue:
169  call void asm sideeffect ".space 2048", ""()
170  ret void
171
172iffalse:
173  call void asm sideeffect "# reg use $0", "{x16}"(i64 %x16)
174  ret void
175}
176
177declare i32 @bar()
178declare i32 @baz()
179